1 | /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- |
2 | */ |
3 | /* |
4 | * |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a |
9 | * copy of this software and associated documentation files (the |
10 | * "Software"), to deal in the Software without restriction, including |
11 | * without limitation the rights to use, copy, modify, merge, publish, |
12 | * distribute, sub license, and/or sell copies of the Software, and to |
13 | * permit persons to whom the Software is furnished to do so, subject to |
14 | * the following conditions: |
15 | * |
16 | * The above copyright notice and this permission notice (including the |
17 | * next paragraph) shall be included in all copies or substantial portions |
18 | * of the Software. |
19 | * |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
27 | * |
28 | */ |
29 | |
30 | #ifndef _I915_DRV_H_ |
31 | #define _I915_DRV_H_ |
32 | |
33 | #if defined(__NetBSD__) |
34 | #ifdef _KERNEL_OPT |
35 | #if defined(i386) || defined(amd64) |
36 | #include "acpica.h" |
37 | #endif /* i386 || amd64 */ |
38 | #endif /* _KERNEL_OPT */ |
39 | #if (NACPICA > 0) |
40 | #define CONFIG_ACPI |
41 | #endif /* NACPICA > 0 */ |
42 | #endif /* __NetBSD__ */ |
43 | |
44 | #include <uapi/drm/i915_drm.h> |
45 | |
46 | #include "i915_reg.h" |
47 | #include "intel_bios.h" |
48 | #include "intel_ringbuffer.h" |
49 | #include <linux/io-mapping.h> |
50 | #include <linux/i2c.h> |
51 | #include <linux/i2c-algo-bit.h> |
52 | #include <drm/intel-gtt.h> |
53 | #include <linux/backlight.h> |
54 | #include <linux/intel-iommu.h> |
55 | #include <linux/kref.h> |
56 | #include <linux/completion.h> |
57 | #include <linux/shrinker.h> |
58 | #include <linux/pm_qos.h> |
59 | #include <linux/sched.h> |
60 | |
61 | /* General customization: |
62 | */ |
63 | |
64 | #define DRIVER_AUTHOR "Tungsten Graphics, Inc." |
65 | |
66 | #define DRIVER_NAME "i915" |
67 | #define DRIVER_DESC "Intel Graphics" |
68 | #define DRIVER_DATE "20080730" |
69 | |
70 | enum i915_pipe { |
71 | INVALID_PIPE = -1, |
72 | PIPE_A = 0, |
73 | PIPE_B, |
74 | PIPE_C, |
75 | _PIPE_EDP, |
76 | I915_MAX_PIPES = _PIPE_EDP |
77 | }; |
78 | #define pipe_name(p) ((p) + 'A') |
79 | |
80 | enum transcoder { |
81 | TRANSCODER_A = 0, |
82 | TRANSCODER_B, |
83 | TRANSCODER_C, |
84 | TRANSCODER_EDP, |
85 | I915_MAX_TRANSCODERS |
86 | }; |
87 | #define transcoder_name(t) ((t) + 'A') |
88 | |
89 | enum plane { |
90 | PLANE_A = 0, |
91 | PLANE_B, |
92 | PLANE_C, |
93 | }; |
94 | #define plane_name(p) ((p) + 'A') |
95 | |
96 | #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') |
97 | |
98 | enum port { |
99 | PORT_A = 0, |
100 | PORT_B, |
101 | PORT_C, |
102 | PORT_D, |
103 | PORT_E, |
104 | I915_MAX_PORTS |
105 | }; |
106 | #define port_name(p) ((p) + 'A') |
107 | |
108 | #define I915_NUM_PHYS_VLV 1 |
109 | |
110 | enum dpio_channel { |
111 | DPIO_CH0, |
112 | DPIO_CH1 |
113 | }; |
114 | |
115 | enum dpio_phy { |
116 | DPIO_PHY0, |
117 | DPIO_PHY1 |
118 | }; |
119 | |
120 | enum intel_display_power_domain { |
121 | POWER_DOMAIN_PIPE_A, |
122 | POWER_DOMAIN_PIPE_B, |
123 | POWER_DOMAIN_PIPE_C, |
124 | POWER_DOMAIN_PIPE_A_PANEL_FITTER, |
125 | POWER_DOMAIN_PIPE_B_PANEL_FITTER, |
126 | POWER_DOMAIN_PIPE_C_PANEL_FITTER, |
127 | POWER_DOMAIN_TRANSCODER_A, |
128 | POWER_DOMAIN_TRANSCODER_B, |
129 | POWER_DOMAIN_TRANSCODER_C, |
130 | POWER_DOMAIN_TRANSCODER_EDP, |
131 | POWER_DOMAIN_PORT_DDI_A_2_LANES, |
132 | POWER_DOMAIN_PORT_DDI_A_4_LANES, |
133 | POWER_DOMAIN_PORT_DDI_B_2_LANES, |
134 | POWER_DOMAIN_PORT_DDI_B_4_LANES, |
135 | POWER_DOMAIN_PORT_DDI_C_2_LANES, |
136 | POWER_DOMAIN_PORT_DDI_C_4_LANES, |
137 | POWER_DOMAIN_PORT_DDI_D_2_LANES, |
138 | POWER_DOMAIN_PORT_DDI_D_4_LANES, |
139 | POWER_DOMAIN_PORT_DSI, |
140 | POWER_DOMAIN_PORT_CRT, |
141 | POWER_DOMAIN_PORT_OTHER, |
142 | POWER_DOMAIN_VGA, |
143 | POWER_DOMAIN_AUDIO, |
144 | POWER_DOMAIN_INIT, |
145 | |
146 | POWER_DOMAIN_NUM, |
147 | }; |
148 | |
149 | #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) |
150 | #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ |
151 | ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) |
152 | #define POWER_DOMAIN_TRANSCODER(tran) \ |
153 | ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ |
154 | (tran) + POWER_DOMAIN_TRANSCODER_A) |
155 | |
156 | enum hpd_pin { |
157 | HPD_NONE = 0, |
158 | HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ |
159 | HPD_TV = HPD_NONE, /* TV is known to be unreliable */ |
160 | HPD_CRT, |
161 | HPD_SDVO_B, |
162 | HPD_SDVO_C, |
163 | HPD_PORT_B, |
164 | HPD_PORT_C, |
165 | HPD_PORT_D, |
166 | HPD_NUM_PINS |
167 | }; |
168 | |
169 | #define I915_GEM_GPU_DOMAINS \ |
170 | (I915_GEM_DOMAIN_RENDER | \ |
171 | I915_GEM_DOMAIN_SAMPLER | \ |
172 | I915_GEM_DOMAIN_COMMAND | \ |
173 | I915_GEM_DOMAIN_INSTRUCTION | \ |
174 | I915_GEM_DOMAIN_VERTEX) |
175 | |
176 | #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) |
177 | #define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) |
178 | |
179 | #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
180 | list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ |
181 | if ((intel_encoder)->base.crtc == (__crtc)) |
182 | |
183 | #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ |
184 | list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ |
185 | if ((intel_connector)->base.encoder == (__encoder)) |
186 | |
187 | struct drm_i915_private; |
188 | |
189 | enum intel_dpll_id { |
190 | DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ |
191 | /* real shared dpll ids must be >= 0 */ |
192 | DPLL_ID_PCH_PLL_A, |
193 | DPLL_ID_PCH_PLL_B, |
194 | }; |
195 | #define I915_NUM_PLLS 2 |
196 | |
197 | struct intel_dpll_hw_state { |
198 | uint32_t dpll; |
199 | uint32_t dpll_md; |
200 | uint32_t fp0; |
201 | uint32_t fp1; |
202 | }; |
203 | |
204 | struct intel_shared_dpll { |
205 | int refcount; /* count of number of CRTCs sharing this PLL */ |
206 | int active; /* count of number of active CRTCs (i.e. DPMS on) */ |
207 | bool on; /* is the PLL actually active? Disabled during modeset */ |
208 | const char *name; |
209 | /* should match the index in the dev_priv->shared_dplls array */ |
210 | enum intel_dpll_id id; |
211 | struct intel_dpll_hw_state hw_state; |
212 | void (*mode_set)(struct drm_i915_private *dev_priv, |
213 | struct intel_shared_dpll *pll); |
214 | void (*enable)(struct drm_i915_private *dev_priv, |
215 | struct intel_shared_dpll *pll); |
216 | void (*disable)(struct drm_i915_private *dev_priv, |
217 | struct intel_shared_dpll *pll); |
218 | bool (*get_hw_state)(struct drm_i915_private *dev_priv, |
219 | struct intel_shared_dpll *pll, |
220 | struct intel_dpll_hw_state *hw_state); |
221 | }; |
222 | |
223 | /* Used by dp and fdi links */ |
224 | struct intel_link_m_n { |
225 | uint32_t tu; |
226 | uint32_t gmch_m; |
227 | uint32_t gmch_n; |
228 | uint32_t link_m; |
229 | uint32_t link_n; |
230 | }; |
231 | |
232 | void intel_link_compute_m_n(int bpp, int nlanes, |
233 | int pixel_clock, int link_clock, |
234 | struct intel_link_m_n *m_n); |
235 | |
236 | struct intel_ddi_plls { |
237 | int spll_refcount; |
238 | int wrpll1_refcount; |
239 | int wrpll2_refcount; |
240 | }; |
241 | |
242 | /* Interface history: |
243 | * |
244 | * 1.1: Original. |
245 | * 1.2: Add Power Management |
246 | * 1.3: Add vblank support |
247 | * 1.4: Fix cmdbuffer path, add heap destroy |
248 | * 1.5: Add vblank pipe configuration |
249 | * 1.6: - New ioctl for scheduling buffer swaps on vertical blank |
250 | * - Support vertical blank on secondary display pipe |
251 | */ |
252 | #define DRIVER_MAJOR 1 |
253 | #define DRIVER_MINOR 6 |
254 | #define DRIVER_PATCHLEVEL 0 |
255 | |
256 | #define WATCH_LISTS 0 |
257 | #define WATCH_GTT 0 |
258 | |
259 | struct ; |
260 | struct opregion_acpi; |
261 | struct opregion_swsci; |
262 | struct opregion_asle; |
263 | |
264 | #ifdef __NetBSD__ /* XXX acpi iomem */ |
265 | # include <linux/acpi_io.h> |
266 | # define __iomem __acpi_iomem |
267 | #endif |
268 | |
269 | struct intel_opregion { |
270 | struct opregion_header __iomem *; |
271 | struct opregion_acpi __iomem *acpi; |
272 | struct opregion_swsci __iomem *swsci; |
273 | u32 swsci_gbda_sub_functions; |
274 | u32 swsci_sbcb_sub_functions; |
275 | struct opregion_asle __iomem *asle; |
276 | void __iomem *vbt; |
277 | u32 __iomem *lid_state; |
278 | struct work_struct asle_work; |
279 | }; |
280 | #define OPREGION_SIZE (8*1024) |
281 | |
282 | #ifdef __NetBSD__ /* XXX acpi iomem */ |
283 | # undef __iomem |
284 | #endif |
285 | |
286 | struct intel_overlay; |
287 | struct intel_overlay_error_state; |
288 | |
289 | struct drm_i915_master_private { |
290 | drm_local_map_t *sarea; |
291 | struct _drm_i915_sarea *sarea_priv; |
292 | }; |
293 | #define I915_FENCE_REG_NONE -1 |
294 | #define I915_MAX_NUM_FENCES 32 |
295 | /* 32 fences + sign bit for FENCE_REG_NONE */ |
296 | #define I915_MAX_NUM_FENCE_BITS 6 |
297 | |
298 | struct drm_i915_fence_reg { |
299 | struct list_head lru_list; |
300 | struct drm_i915_gem_object *obj; |
301 | int pin_count; |
302 | }; |
303 | |
304 | struct sdvo_device_mapping { |
305 | u8 initialized; |
306 | u8 dvo_port; |
307 | u8 slave_addr; |
308 | u8 dvo_wiring; |
309 | u8 i2c_pin; |
310 | u8 ddc_pin; |
311 | }; |
312 | |
313 | struct intel_display_error_state; |
314 | |
315 | struct drm_i915_error_state { |
316 | struct kref ref; |
317 | struct timeval time; |
318 | |
319 | char error_msg[128]; |
320 | u32 reset_count; |
321 | u32 suspend_count; |
322 | |
323 | /* Generic register state */ |
324 | u32 eir; |
325 | u32 pgtbl_er; |
326 | u32 ier; |
327 | u32 ccid; |
328 | u32 derrmr; |
329 | u32 forcewake; |
330 | u32 error; /* gen6+ */ |
331 | u32 err_int; /* gen7 */ |
332 | u32 done_reg; |
333 | u32 gac_eco; |
334 | u32 gam_ecochk; |
335 | u32 gab_ctl; |
336 | u32 gfx_mode; |
337 | u32 [I915_NUM_INSTDONE_REG]; |
338 | u32 pipestat[I915_MAX_PIPES]; |
339 | u64 fence[I915_MAX_NUM_FENCES]; |
340 | struct intel_overlay_error_state *overlay; |
341 | struct intel_display_error_state *display; |
342 | |
343 | struct drm_i915_error_ring { |
344 | bool valid; |
345 | /* Software tracked state */ |
346 | bool waiting; |
347 | int hangcheck_score; |
348 | enum intel_ring_hangcheck_action hangcheck_action; |
349 | int num_requests; |
350 | |
351 | /* our own tracking of ring head and tail */ |
352 | u32 cpu_ring_head; |
353 | u32 cpu_ring_tail; |
354 | |
355 | u32 semaphore_seqno[I915_NUM_RINGS - 1]; |
356 | |
357 | /* Register state */ |
358 | u32 tail; |
359 | u32 head; |
360 | u32 ctl; |
361 | u32 hws; |
362 | u32 ipeir; |
363 | u32 ipehr; |
364 | u32 instdone; |
365 | u32 bbstate; |
366 | u32 instpm; |
367 | u32 instps; |
368 | u32 seqno; |
369 | u64 bbaddr; |
370 | u64 acthd; |
371 | u32 fault_reg; |
372 | u32 faddr; |
373 | u32 rc_psmi; /* sleep state */ |
374 | u32 semaphore_mboxes[I915_NUM_RINGS - 1]; |
375 | |
376 | struct drm_i915_error_object { |
377 | int page_count; |
378 | u32 gtt_offset; |
379 | u32 *pages[0]; |
380 | } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; |
381 | |
382 | struct drm_i915_error_request { |
383 | long jiffies; |
384 | u32 seqno; |
385 | u32 tail; |
386 | } *requests; |
387 | |
388 | struct { |
389 | u32 gfx_mode; |
390 | union { |
391 | u64 pdp[4]; |
392 | u32 pp_dir_base; |
393 | }; |
394 | } vm_info; |
395 | |
396 | pid_t pid; |
397 | char comm[TASK_COMM_LEN]; |
398 | } ring[I915_NUM_RINGS]; |
399 | struct drm_i915_error_buffer { |
400 | u32 size; |
401 | u32 name; |
402 | u32 rseqno, wseqno; |
403 | u32 gtt_offset; |
404 | u32 read_domains; |
405 | u32 write_domain; |
406 | s32 fence_reg:I915_MAX_NUM_FENCE_BITS; |
407 | s32 pinned:2; |
408 | u32 tiling:2; |
409 | u32 dirty:1; |
410 | u32 purgeable:1; |
411 | s32 ring:4; |
412 | u32 cache_level:3; |
413 | } **active_bo, **pinned_bo; |
414 | |
415 | u32 *active_bo_count, *pinned_bo_count; |
416 | }; |
417 | |
418 | struct intel_connector; |
419 | struct intel_crtc_config; |
420 | struct intel_plane_config; |
421 | struct intel_crtc; |
422 | struct intel_limit; |
423 | struct dpll; |
424 | |
425 | struct drm_i915_display_funcs { |
426 | bool (*fbc_enabled)(struct drm_device *dev); |
427 | void (*enable_fbc)(struct drm_crtc *crtc); |
428 | void (*disable_fbc)(struct drm_device *dev); |
429 | int (*get_display_clock_speed)(struct drm_device *dev); |
430 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
431 | /** |
432 | * find_dpll() - Find the best values for the PLL |
433 | * @limit: limits for the PLL |
434 | * @crtc: current CRTC |
435 | * @target: target frequency in kHz |
436 | * @refclk: reference clock frequency in kHz |
437 | * @match_clock: if provided, @best_clock P divider must |
438 | * match the P divider from @match_clock |
439 | * used for LVDS downclocking |
440 | * @best_clock: best PLL values found |
441 | * |
442 | * Returns true on success, false on failure. |
443 | */ |
444 | bool (*find_dpll)(const struct intel_limit *limit, |
445 | struct drm_crtc *crtc, |
446 | int target, int refclk, |
447 | struct dpll *match_clock, |
448 | struct dpll *best_clock); |
449 | void (*update_wm)(struct drm_crtc *crtc); |
450 | void (*update_sprite_wm)(struct drm_plane *plane, |
451 | struct drm_crtc *crtc, |
452 | uint32_t sprite_width, int pixel_size, |
453 | bool enable, bool scaled); |
454 | void (*modeset_global_resources)(struct drm_device *dev); |
455 | /* Returns the active state of the crtc, and if the crtc is active, |
456 | * fills out the pipe-config with the hw state. */ |
457 | bool (*get_pipe_config)(struct intel_crtc *, |
458 | struct intel_crtc_config *); |
459 | void (*get_plane_config)(struct intel_crtc *, |
460 | struct intel_plane_config *); |
461 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
462 | int x, int y, |
463 | struct drm_framebuffer *old_fb); |
464 | void (*crtc_enable)(struct drm_crtc *crtc); |
465 | void (*crtc_disable)(struct drm_crtc *crtc); |
466 | void (*off)(struct drm_crtc *crtc); |
467 | void (*write_eld)(struct drm_connector *connector, |
468 | struct drm_crtc *crtc, |
469 | struct drm_display_mode *mode); |
470 | void (*fdi_link_train)(struct drm_crtc *crtc); |
471 | void (*init_clock_gating)(struct drm_device *dev); |
472 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
473 | struct drm_framebuffer *fb, |
474 | struct drm_i915_gem_object *obj, |
475 | uint32_t flags); |
476 | int (*update_primary_plane)(struct drm_crtc *crtc, |
477 | struct drm_framebuffer *fb, |
478 | int x, int y); |
479 | void (*hpd_irq_setup)(struct drm_device *dev); |
480 | /* clock updates for mode set */ |
481 | /* cursor updates */ |
482 | /* render clock increase/decrease */ |
483 | /* display clock increase/decrease */ |
484 | /* pll clock increase/decrease */ |
485 | |
486 | int (*setup_backlight)(struct intel_connector *connector); |
487 | uint32_t (*get_backlight)(struct intel_connector *connector); |
488 | void (*set_backlight)(struct intel_connector *connector, |
489 | uint32_t level); |
490 | void (*disable_backlight)(struct intel_connector *connector); |
491 | void (*enable_backlight)(struct intel_connector *connector); |
492 | }; |
493 | |
494 | struct intel_uncore_funcs { |
495 | void (*force_wake_get)(struct drm_i915_private *dev_priv, |
496 | int fw_engine); |
497 | void (*force_wake_put)(struct drm_i915_private *dev_priv, |
498 | int fw_engine); |
499 | |
500 | uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
501 | uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
502 | uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
503 | uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
504 | |
505 | void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, |
506 | uint8_t val, bool trace); |
507 | void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, |
508 | uint16_t val, bool trace); |
509 | void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, |
510 | uint32_t val, bool trace); |
511 | void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, |
512 | uint64_t val, bool trace); |
513 | }; |
514 | |
515 | struct intel_uncore { |
516 | spinlock_t lock; /** lock is also taken in irq contexts. */ |
517 | |
518 | struct intel_uncore_funcs funcs; |
519 | |
520 | unsigned fifo_count; |
521 | unsigned forcewake_count; |
522 | |
523 | unsigned fw_rendercount; |
524 | unsigned fw_mediacount; |
525 | |
526 | struct timer_list force_wake_timer; |
527 | }; |
528 | |
529 | #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ |
530 | func(is_mobile) sep \ |
531 | func(is_i85x) sep \ |
532 | func(is_i915g) sep \ |
533 | func(is_i945gm) sep \ |
534 | func(is_g33) sep \ |
535 | func(need_gfx_hws) sep \ |
536 | func(is_g4x) sep \ |
537 | func(is_pineview) sep \ |
538 | func(is_broadwater) sep \ |
539 | func(is_crestline) sep \ |
540 | func(is_ivybridge) sep \ |
541 | func(is_valleyview) sep \ |
542 | func(is_haswell) sep \ |
543 | func(is_preliminary) sep \ |
544 | func(has_fbc) sep \ |
545 | func(has_pipe_cxsr) sep \ |
546 | func(has_hotplug) sep \ |
547 | func(cursor_needs_physical) sep \ |
548 | func(has_overlay) sep \ |
549 | func(overlay_needs_physical) sep \ |
550 | func(supports_tv) sep \ |
551 | func(has_llc) sep \ |
552 | func(has_ddi) sep \ |
553 | func(has_fpga_dbg) |
554 | |
555 | #define DEFINE_FLAG(name) u8 name:1 |
556 | #define SEP_SEMICOLON ; |
557 | |
558 | struct intel_device_info { |
559 | u32 display_mmio_offset; |
560 | u8 num_pipes:3; |
561 | u8 num_sprites[I915_MAX_PIPES]; |
562 | u8 gen; |
563 | u8 ring_mask; /* Rings supported by the HW */ |
564 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); |
565 | /* Register offsets for the various display pipes and transcoders */ |
566 | int pipe_offsets[I915_MAX_TRANSCODERS]; |
567 | int trans_offsets[I915_MAX_TRANSCODERS]; |
568 | int dpll_offsets[I915_MAX_PIPES]; |
569 | int dpll_md_offsets[I915_MAX_PIPES]; |
570 | int palette_offsets[I915_MAX_PIPES]; |
571 | }; |
572 | |
573 | #undef DEFINE_FLAG |
574 | #undef SEP_SEMICOLON |
575 | |
576 | enum i915_cache_level { |
577 | I915_CACHE_NONE = 0, |
578 | I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ |
579 | I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc |
580 | caches, eg sampler/render caches, and the |
581 | large Last-Level-Cache. LLC is coherent with |
582 | the CPU, but L3 is only visible to the GPU. */ |
583 | I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ |
584 | }; |
585 | |
586 | typedef uint32_t gen6_gtt_pte_t; |
587 | |
588 | /** |
589 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a |
590 | * VMA's presence cannot be guaranteed before binding, or after unbinding the |
591 | * object into/from the address space. |
592 | * |
593 | * To make things as simple as possible (ie. no refcounting), a VMA's lifetime |
594 | * will always be <= an objects lifetime. So object refcounting should cover us. |
595 | */ |
596 | struct i915_vma { |
597 | struct drm_mm_node node; |
598 | struct drm_i915_gem_object *obj; |
599 | struct i915_address_space *vm; |
600 | |
601 | /** This object's place on the active/inactive lists */ |
602 | struct list_head mm_list; |
603 | |
604 | struct list_head vma_link; /* Link in the object's VMA list */ |
605 | |
606 | /** This vma's place in the batchbuffer or on the eviction list */ |
607 | struct list_head exec_list; |
608 | |
609 | /** |
610 | * Used for performing relocations during execbuffer insertion. |
611 | */ |
612 | struct hlist_node exec_node; |
613 | unsigned long exec_handle; |
614 | struct drm_i915_gem_exec_object2 *exec_entry; |
615 | |
616 | /** |
617 | * How many users have pinned this object in GTT space. The following |
618 | * users can each hold at most one reference: pwrite/pread, pin_ioctl |
619 | * (via user_pin_count), execbuffer (objects are not allowed multiple |
620 | * times for the same batchbuffer), and the framebuffer code. When |
621 | * switching/pageflipping, the framebuffer code has at most two buffers |
622 | * pinned per crtc. |
623 | * |
624 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
625 | * bits with absolutely no headroom. So use 4 bits. */ |
626 | unsigned int pin_count:4; |
627 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
628 | |
629 | /** Unmap an object from an address space. This usually consists of |
630 | * setting the valid PTE entries to a reserved scratch page. */ |
631 | void (*unbind_vma)(struct i915_vma *vma); |
632 | /* Map an object into an address space with the given cache flags. */ |
633 | #define GLOBAL_BIND (1<<0) |
634 | void (*bind_vma)(struct i915_vma *vma, |
635 | enum i915_cache_level cache_level, |
636 | u32 flags); |
637 | }; |
638 | |
639 | struct i915_address_space { |
640 | struct drm_mm mm; |
641 | struct drm_device *dev; |
642 | struct list_head global_link; |
643 | unsigned long start; /* Start offset always 0 for dri2 */ |
644 | size_t total; /* size addr space maps (ex. 2GB for ggtt) */ |
645 | |
646 | struct { |
647 | dma_addr_t addr; |
648 | #ifdef __NetBSD__ |
649 | bus_dma_segment_t seg; |
650 | bus_dmamap_t map; |
651 | #else |
652 | struct page *page; |
653 | #endif |
654 | } scratch; |
655 | |
656 | /** |
657 | * List of objects currently involved in rendering. |
658 | * |
659 | * Includes buffers having the contents of their GPU caches |
660 | * flushed, not necessarily primitives. last_rendering_seqno |
661 | * represents when the rendering involved will be completed. |
662 | * |
663 | * A reference is held on the buffer while on this list. |
664 | */ |
665 | struct list_head active_list; |
666 | |
667 | /** |
668 | * LRU list of objects which are not in the ringbuffer and |
669 | * are ready to unbind, but are still in the GTT. |
670 | * |
671 | * last_rendering_seqno is 0 while an object is in this list. |
672 | * |
673 | * A reference is not held on the buffer while on this list, |
674 | * as merely being GTT-bound shouldn't prevent its being |
675 | * freed, and we'll pull it off the list in the free path. |
676 | */ |
677 | struct list_head inactive_list; |
678 | |
679 | /* FIXME: Need a more generic return type */ |
680 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, |
681 | enum i915_cache_level level, |
682 | bool valid); /* Create a valid PTE */ |
683 | void (*clear_range)(struct i915_address_space *vm, |
684 | uint64_t start, |
685 | uint64_t length, |
686 | bool use_scratch); |
687 | void (*insert_entries)(struct i915_address_space *vm, |
688 | #ifdef __NetBSD__ |
689 | bus_dmamap_t dmamap, |
690 | #else |
691 | struct sg_table *st, |
692 | #endif |
693 | uint64_t start, |
694 | enum i915_cache_level cache_level); |
695 | void (*cleanup)(struct i915_address_space *vm); |
696 | }; |
697 | |
698 | #ifdef __NetBSD__ |
699 | # define __gtt_iomem |
700 | # define __iomem __gtt_iomem |
701 | #endif |
702 | |
703 | /* The Graphics Translation Table is the way in which GEN hardware translates a |
704 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
705 | * collateral associated with any va->pa translations GEN hardware also has a |
706 | * portion of the GTT which can be mapped by the CPU and remain both coherent |
707 | * and correct (in cases like swizzling). That region is referred to as GMADR in |
708 | * the spec. |
709 | */ |
710 | struct i915_gtt { |
711 | struct i915_address_space base; |
712 | size_t stolen_size; /* Total size of stolen memory */ |
713 | |
714 | unsigned long mappable_end; /* End offset that we can CPU map */ |
715 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
716 | phys_addr_t mappable_base; /* PA of our GMADR */ |
717 | |
718 | /** "Graphics Stolen Memory" holds the global PTEs */ |
719 | #ifdef __NetBSD__ |
720 | /* |
721 | * This is not actually the `Graphics Stolen Memory'; it is the |
722 | * graphics translation table, which we write to through the |
723 | * GTTADR/GTTMMADR PCI BAR, and which is backed by `Graphics |
724 | * GTT Stolen Memory'. That isn't the `Graphics Stolen Memory' |
725 | * either, although it is stolen from main memory. |
726 | */ |
727 | bus_space_tag_t bst; |
728 | bus_space_handle_t bsh; |
729 | bus_size_t size; |
730 | |
731 | /* Maximum physical address that can be wired into a GTT entry. */ |
732 | uint64_t max_paddr; |
733 | |
734 | /* Page freelist for pages limited to the above maximum address. */ |
735 | int pgfl; |
736 | #else |
737 | void __iomem *gsm; |
738 | #endif |
739 | |
740 | bool do_idle_maps; |
741 | |
742 | int mtrr; |
743 | |
744 | /* global gtt ops */ |
745 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, |
746 | size_t *stolen, phys_addr_t *mappable_base, |
747 | unsigned long *mappable_end); |
748 | }; |
749 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) |
750 | |
751 | #ifdef __NetBSD__ |
752 | # undef __iomem |
753 | # undef __gtt_iomem |
754 | #endif |
755 | |
756 | #define GEN8_LEGACY_PDPS 4 |
757 | struct i915_hw_ppgtt { |
758 | struct i915_address_space base; |
759 | struct kref ref; |
760 | struct drm_mm_node node; |
761 | unsigned num_pd_entries; |
762 | #ifdef __NetBSD__ |
763 | union { |
764 | struct { |
765 | unsigned npdp; |
766 | bus_dma_segment_t pd_segs[GEN8_LEGACY_PDPS]; |
767 | bus_dmamap_t pd_map; |
768 | struct { |
769 | /* XXX Should be GEN8_PDES_PER_PAGE. */ |
770 | bus_dma_segment_t pt_segs[PAGE_SIZE/8]; |
771 | bus_dmamap_t pt_map; |
772 | } pd[GEN8_LEGACY_PDPS]; |
773 | } *gen8; |
774 | struct { |
775 | bus_size_t pd_base; |
776 | bus_dma_segment_t *pt_segs; /* num_pd_entries */ |
777 | bus_dmamap_t pt_map; |
778 | } *gen6; |
779 | } u; |
780 | #else |
781 | unsigned num_pd_pages; /* gen8+ */ |
782 | union { |
783 | struct page **pt_pages; |
784 | struct page **gen8_pt_pages[GEN8_LEGACY_PDPS]; |
785 | }; |
786 | struct page *pd_pages; |
787 | union { |
788 | uint32_t pd_offset; |
789 | dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS]; |
790 | }; |
791 | union { |
792 | dma_addr_t *pt_dma_addr; |
793 | dma_addr_t *gen8_pt_dma_addr[4]; |
794 | }; |
795 | #endif |
796 | |
797 | struct i915_hw_context *ctx; |
798 | |
799 | int (*enable)(struct i915_hw_ppgtt *ppgtt); |
800 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, |
801 | struct intel_ring_buffer *ring, |
802 | bool synchronous); |
803 | void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); |
804 | }; |
805 | |
806 | struct i915_ctx_hang_stats { |
807 | /* This context had batch pending when hang was declared */ |
808 | unsigned batch_pending; |
809 | |
810 | /* This context had batch active when hang was declared */ |
811 | unsigned batch_active; |
812 | |
813 | /* Time when this context was last blamed for a GPU reset */ |
814 | unsigned long guilty_ts; |
815 | |
816 | /* This context is banned to submit more work */ |
817 | bool banned; |
818 | }; |
819 | |
820 | /* This must match up with the value previously used for execbuf2.rsvd1. */ |
821 | #define DEFAULT_CONTEXT_ID 0 |
822 | struct i915_hw_context { |
823 | struct kref ref; |
824 | int id; |
825 | bool is_initialized; |
826 | uint8_t remap_slice; |
827 | struct drm_i915_file_private *file_priv; |
828 | struct intel_ring_buffer *last_ring; |
829 | struct drm_i915_gem_object *obj; |
830 | struct i915_ctx_hang_stats hang_stats; |
831 | struct i915_address_space *vm; |
832 | |
833 | struct list_head link; |
834 | }; |
835 | |
836 | struct i915_fbc { |
837 | unsigned long size; |
838 | unsigned int fb_id; |
839 | enum plane plane; |
840 | int y; |
841 | |
842 | struct drm_mm_node *compressed_fb; |
843 | struct drm_mm_node *compressed_llb; |
844 | |
845 | struct intel_fbc_work { |
846 | struct delayed_work work; |
847 | struct drm_crtc *crtc; |
848 | struct drm_framebuffer *fb; |
849 | } *fbc_work; |
850 | |
851 | enum no_fbc_reason { |
852 | FBC_OK, /* FBC is enabled */ |
853 | FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ |
854 | FBC_NO_OUTPUT, /* no outputs enabled to compress */ |
855 | FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ |
856 | FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ |
857 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
858 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
859 | FBC_NOT_TILED, /* buffer not tiled */ |
860 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ |
861 | FBC_MODULE_PARAM, |
862 | FBC_CHIP_DEFAULT, /* disabled by default on this chip */ |
863 | } no_fbc_reason; |
864 | }; |
865 | |
866 | struct i915_psr { |
867 | bool sink_support; |
868 | bool source_ok; |
869 | }; |
870 | |
871 | enum intel_pch { |
872 | PCH_NONE = 0, /* No PCH present */ |
873 | PCH_IBX, /* Ibexpeak PCH */ |
874 | PCH_CPT, /* Cougarpoint PCH */ |
875 | PCH_LPT, /* Lynxpoint PCH */ |
876 | PCH_NOP, |
877 | }; |
878 | |
879 | enum intel_sbi_destination { |
880 | SBI_ICLK, |
881 | SBI_MPHY, |
882 | }; |
883 | |
884 | #define QUIRK_PIPEA_FORCE (1<<0) |
885 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
886 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
887 | |
888 | struct intel_fbdev; |
889 | struct intel_fbc_work; |
890 | |
891 | struct intel_gmbus { |
892 | struct i2c_adapter adapter; |
893 | u32 force_bit; |
894 | u32 reg0; |
895 | u32 gpio_reg; |
896 | struct i2c_algo_bit_data bit_algo; |
897 | struct drm_i915_private *dev_priv; |
898 | }; |
899 | |
900 | struct i915_suspend_saved_registers { |
901 | u8 saveLBB; |
902 | u32 saveDSPACNTR; |
903 | u32 saveDSPBCNTR; |
904 | u32 saveDSPARB; |
905 | u32 savePIPEACONF; |
906 | u32 savePIPEBCONF; |
907 | u32 savePIPEASRC; |
908 | u32 savePIPEBSRC; |
909 | u32 saveFPA0; |
910 | u32 saveFPA1; |
911 | u32 saveDPLL_A; |
912 | u32 saveDPLL_A_MD; |
913 | u32 saveHTOTAL_A; |
914 | u32 saveHBLANK_A; |
915 | u32 saveHSYNC_A; |
916 | u32 saveVTOTAL_A; |
917 | u32 saveVBLANK_A; |
918 | u32 saveVSYNC_A; |
919 | u32 saveBCLRPAT_A; |
920 | u32 saveTRANSACONF; |
921 | u32 saveTRANS_HTOTAL_A; |
922 | u32 saveTRANS_HBLANK_A; |
923 | u32 saveTRANS_HSYNC_A; |
924 | u32 saveTRANS_VTOTAL_A; |
925 | u32 saveTRANS_VBLANK_A; |
926 | u32 saveTRANS_VSYNC_A; |
927 | u32 savePIPEASTAT; |
928 | u32 saveDSPASTRIDE; |
929 | u32 saveDSPASIZE; |
930 | u32 saveDSPAPOS; |
931 | u32 saveDSPAADDR; |
932 | u32 saveDSPASURF; |
933 | u32 saveDSPATILEOFF; |
934 | u32 savePFIT_PGM_RATIOS; |
935 | u32 saveBLC_HIST_CTL; |
936 | u32 saveBLC_PWM_CTL; |
937 | u32 saveBLC_PWM_CTL2; |
938 | u32 saveBLC_HIST_CTL_B; |
939 | u32 saveBLC_CPU_PWM_CTL; |
940 | u32 saveBLC_CPU_PWM_CTL2; |
941 | u32 saveFPB0; |
942 | u32 saveFPB1; |
943 | u32 saveDPLL_B; |
944 | u32 saveDPLL_B_MD; |
945 | u32 saveHTOTAL_B; |
946 | u32 saveHBLANK_B; |
947 | u32 saveHSYNC_B; |
948 | u32 saveVTOTAL_B; |
949 | u32 saveVBLANK_B; |
950 | u32 saveVSYNC_B; |
951 | u32 saveBCLRPAT_B; |
952 | u32 saveTRANSBCONF; |
953 | u32 saveTRANS_HTOTAL_B; |
954 | u32 saveTRANS_HBLANK_B; |
955 | u32 saveTRANS_HSYNC_B; |
956 | u32 saveTRANS_VTOTAL_B; |
957 | u32 saveTRANS_VBLANK_B; |
958 | u32 saveTRANS_VSYNC_B; |
959 | u32 savePIPEBSTAT; |
960 | u32 saveDSPBSTRIDE; |
961 | u32 saveDSPBSIZE; |
962 | u32 saveDSPBPOS; |
963 | u32 saveDSPBADDR; |
964 | u32 saveDSPBSURF; |
965 | u32 saveDSPBTILEOFF; |
966 | u32 saveVGA0; |
967 | u32 saveVGA1; |
968 | u32 saveVGA_PD; |
969 | u32 saveVGACNTRL; |
970 | u32 saveADPA; |
971 | u32 saveLVDS; |
972 | u32 savePP_ON_DELAYS; |
973 | u32 savePP_OFF_DELAYS; |
974 | u32 saveDVOA; |
975 | u32 saveDVOB; |
976 | u32 saveDVOC; |
977 | u32 savePP_ON; |
978 | u32 savePP_OFF; |
979 | u32 savePP_CONTROL; |
980 | u32 savePP_DIVISOR; |
981 | u32 savePFIT_CONTROL; |
982 | u32 save_palette_a[256]; |
983 | u32 save_palette_b[256]; |
984 | u32 saveFBC_CONTROL; |
985 | u32 saveIER; |
986 | u32 saveIIR; |
987 | u32 saveIMR; |
988 | u32 saveDEIER; |
989 | u32 saveDEIMR; |
990 | u32 saveGTIER; |
991 | u32 saveGTIMR; |
992 | u32 saveFDI_RXA_IMR; |
993 | u32 saveFDI_RXB_IMR; |
994 | u32 saveCACHE_MODE_0; |
995 | u32 saveMI_ARB_STATE; |
996 | u32 saveSWF0[16]; |
997 | u32 saveSWF1[16]; |
998 | u32 saveSWF2[3]; |
999 | u8 saveMSR; |
1000 | u8 saveSR[8]; |
1001 | u8 saveGR[25]; |
1002 | u8 saveAR_INDEX; |
1003 | u8 saveAR[21]; |
1004 | u8 saveDACMASK; |
1005 | u8 saveCR[37]; |
1006 | uint64_t saveFENCE[I915_MAX_NUM_FENCES]; |
1007 | u32 saveCURACNTR; |
1008 | u32 saveCURAPOS; |
1009 | u32 saveCURABASE; |
1010 | u32 saveCURBCNTR; |
1011 | u32 saveCURBPOS; |
1012 | u32 saveCURBBASE; |
1013 | u32 saveCURSIZE; |
1014 | u32 saveDP_B; |
1015 | u32 saveDP_C; |
1016 | u32 saveDP_D; |
1017 | u32 savePIPEA_GMCH_DATA_M; |
1018 | u32 savePIPEB_GMCH_DATA_M; |
1019 | u32 savePIPEA_GMCH_DATA_N; |
1020 | u32 savePIPEB_GMCH_DATA_N; |
1021 | u32 savePIPEA_DP_LINK_M; |
1022 | u32 savePIPEB_DP_LINK_M; |
1023 | u32 savePIPEA_DP_LINK_N; |
1024 | u32 savePIPEB_DP_LINK_N; |
1025 | u32 saveFDI_RXA_CTL; |
1026 | u32 saveFDI_TXA_CTL; |
1027 | u32 saveFDI_RXB_CTL; |
1028 | u32 saveFDI_TXB_CTL; |
1029 | u32 savePFA_CTL_1; |
1030 | u32 savePFB_CTL_1; |
1031 | u32 savePFA_WIN_SZ; |
1032 | u32 savePFB_WIN_SZ; |
1033 | u32 savePFA_WIN_POS; |
1034 | u32 savePFB_WIN_POS; |
1035 | u32 savePCH_DREF_CONTROL; |
1036 | u32 saveDISP_ARB_CTL; |
1037 | u32 savePIPEA_DATA_M1; |
1038 | u32 savePIPEA_DATA_N1; |
1039 | u32 savePIPEA_LINK_M1; |
1040 | u32 savePIPEA_LINK_N1; |
1041 | u32 savePIPEB_DATA_M1; |
1042 | u32 savePIPEB_DATA_N1; |
1043 | u32 savePIPEB_LINK_M1; |
1044 | u32 savePIPEB_LINK_N1; |
1045 | u32 saveMCHBAR_RENDER_STANDBY; |
1046 | u32 savePCH_PORT_HOTPLUG; |
1047 | }; |
1048 | |
1049 | struct intel_gen6_power_mgmt { |
1050 | /* work and pm_iir are protected by dev_priv->irq_lock */ |
1051 | struct work_struct work; |
1052 | u32 pm_iir; |
1053 | |
1054 | /* Frequencies are stored in potentially platform dependent multiples. |
1055 | * In other words, *_freq needs to be multiplied by X to be interesting. |
1056 | * Soft limits are those which are used for the dynamic reclocking done |
1057 | * by the driver (raise frequencies under heavy loads, and lower for |
1058 | * lighter loads). Hard limits are those imposed by the hardware. |
1059 | * |
1060 | * A distinction is made for overclocking, which is never enabled by |
1061 | * default, and is considered to be above the hard limit if it's |
1062 | * possible at all. |
1063 | */ |
1064 | u8 cur_freq; /* Current frequency (cached, may not == HW) */ |
1065 | u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ |
1066 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ |
1067 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ |
1068 | u8 min_freq; /* AKA RPn. Minimum frequency */ |
1069 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ |
1070 | u8 rp1_freq; /* "less than" RP0 power/freqency */ |
1071 | u8 rp0_freq; /* Non-overclocked max frequency. */ |
1072 | |
1073 | int last_adj; |
1074 | enum { LOW_POWER, BETWEEN, HIGH_POWER } power; |
1075 | |
1076 | bool enabled; |
1077 | struct delayed_work delayed_resume_work; |
1078 | |
1079 | /* |
1080 | * Protects RPS/RC6 register access and PCU communication. |
1081 | * Must be taken after struct_mutex if nested. |
1082 | */ |
1083 | struct mutex hw_lock; |
1084 | }; |
1085 | |
1086 | /* defined intel_pm.c */ |
1087 | extern spinlock_t mchdev_lock; |
1088 | |
1089 | struct intel_ilk_power_mgmt { |
1090 | u8 cur_delay; |
1091 | u8 min_delay; |
1092 | u8 max_delay; |
1093 | u8 fmax; |
1094 | u8 fstart; |
1095 | |
1096 | u64 last_count1; |
1097 | unsigned long last_time1; |
1098 | unsigned long chipset_power; |
1099 | u64 last_count2; |
1100 | struct timespec last_time2; |
1101 | unsigned long gfx_power; |
1102 | u8 corr; |
1103 | |
1104 | int c_m; |
1105 | int r_t; |
1106 | |
1107 | struct drm_i915_gem_object *pwrctx; |
1108 | struct drm_i915_gem_object *renderctx; |
1109 | }; |
1110 | |
1111 | struct drm_i915_private; |
1112 | struct i915_power_well; |
1113 | |
1114 | struct i915_power_well_ops { |
1115 | /* |
1116 | * Synchronize the well's hw state to match the current sw state, for |
1117 | * example enable/disable it based on the current refcount. Called |
1118 | * during driver init and resume time, possibly after first calling |
1119 | * the enable/disable handlers. |
1120 | */ |
1121 | void (*sync_hw)(struct drm_i915_private *dev_priv, |
1122 | struct i915_power_well *power_well); |
1123 | /* |
1124 | * Enable the well and resources that depend on it (for example |
1125 | * interrupts located on the well). Called after the 0->1 refcount |
1126 | * transition. |
1127 | */ |
1128 | void (*enable)(struct drm_i915_private *dev_priv, |
1129 | struct i915_power_well *power_well); |
1130 | /* |
1131 | * Disable the well and resources that depend on it. Called after |
1132 | * the 1->0 refcount transition. |
1133 | */ |
1134 | void (*disable)(struct drm_i915_private *dev_priv, |
1135 | struct i915_power_well *power_well); |
1136 | /* Returns the hw enabled state. */ |
1137 | bool (*is_enabled)(struct drm_i915_private *dev_priv, |
1138 | struct i915_power_well *power_well); |
1139 | }; |
1140 | |
1141 | /* Power well structure for haswell */ |
1142 | struct i915_power_well { |
1143 | const char *name; |
1144 | bool always_on; |
1145 | /* power well enable/disable usage count */ |
1146 | int count; |
1147 | unsigned long domains; |
1148 | unsigned long data; |
1149 | const struct i915_power_well_ops *ops; |
1150 | }; |
1151 | |
1152 | struct i915_power_domains { |
1153 | /* |
1154 | * Power wells needed for initialization at driver init and suspend |
1155 | * time are on. They are kept on until after the first modeset. |
1156 | */ |
1157 | bool init_power_on; |
1158 | int power_well_count; |
1159 | |
1160 | struct mutex lock; |
1161 | int domain_use_count[POWER_DOMAIN_NUM]; |
1162 | struct i915_power_well *power_wells; |
1163 | }; |
1164 | |
1165 | struct i915_dri1_state { |
1166 | unsigned allow_batchbuffer : 1; |
1167 | #ifdef __NetBSD__ |
1168 | bus_space_handle_t gfx_hws_cpu_bsh; |
1169 | #else |
1170 | u32 __iomem *gfx_hws_cpu_addr; |
1171 | #endif |
1172 | |
1173 | unsigned int cpp; |
1174 | int back_offset; |
1175 | int front_offset; |
1176 | int current_page; |
1177 | int page_flipping; |
1178 | |
1179 | uint32_t counter; |
1180 | }; |
1181 | |
1182 | struct i915_ums_state { |
1183 | /** |
1184 | * Flag if the X Server, and thus DRM, is not currently in |
1185 | * control of the device. |
1186 | * |
1187 | * This is set between LeaveVT and EnterVT. It needs to be |
1188 | * replaced with a semaphore. It also needs to be |
1189 | * transitioned away from for kernel modesetting. |
1190 | */ |
1191 | int mm_suspended; |
1192 | }; |
1193 | |
1194 | #define MAX_L3_SLICES 2 |
1195 | struct intel_l3_parity { |
1196 | u32 *remap_info[MAX_L3_SLICES]; |
1197 | struct work_struct error_work; |
1198 | int which_slice; |
1199 | }; |
1200 | |
1201 | struct i915_gem_mm { |
1202 | /** Memory allocator for GTT stolen memory */ |
1203 | struct drm_mm stolen; |
1204 | /** List of all objects in gtt_space. Used to restore gtt |
1205 | * mappings on resume */ |
1206 | struct list_head bound_list; |
1207 | /** |
1208 | * List of objects which are not bound to the GTT (thus |
1209 | * are idle and not used by the GPU) but still have |
1210 | * (presumably uncached) pages still attached. |
1211 | */ |
1212 | struct list_head unbound_list; |
1213 | |
1214 | /** Usable portion of the GTT for GEM */ |
1215 | unsigned long stolen_base; /* limited to low memory (32-bit) */ |
1216 | |
1217 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
1218 | struct i915_hw_ppgtt *aliasing_ppgtt; |
1219 | |
1220 | struct shrinker inactive_shrinker; |
1221 | bool shrinker_no_lock_stealing; |
1222 | |
1223 | /** LRU list of objects with fence regs on them. */ |
1224 | struct list_head fence_list; |
1225 | |
1226 | /** |
1227 | * We leave the user IRQ off as much as possible, |
1228 | * but this means that requests will finish and never |
1229 | * be retired once the system goes idle. Set a timer to |
1230 | * fire periodically while the ring is running. When it |
1231 | * fires, go retire requests. |
1232 | */ |
1233 | struct delayed_work retire_work; |
1234 | |
1235 | /** |
1236 | * When we detect an idle GPU, we want to turn on |
1237 | * powersaving features. So once we see that there |
1238 | * are no more requests outstanding and no more |
1239 | * arrive within a small period of time, we fire |
1240 | * off the idle_work. |
1241 | */ |
1242 | struct delayed_work idle_work; |
1243 | |
1244 | /** |
1245 | * Are we in a non-interruptible section of code like |
1246 | * modesetting? |
1247 | */ |
1248 | bool interruptible; |
1249 | |
1250 | /** |
1251 | * Is the GPU currently considered idle, or busy executing userspace |
1252 | * requests? Whilst idle, we attempt to power down the hardware and |
1253 | * display clocks. In order to reduce the effect on performance, there |
1254 | * is a slight delay before we do so. |
1255 | */ |
1256 | bool busy; |
1257 | |
1258 | /** Bit 6 swizzling required for X tiling */ |
1259 | uint32_t bit_6_swizzle_x; |
1260 | /** Bit 6 swizzling required for Y tiling */ |
1261 | uint32_t bit_6_swizzle_y; |
1262 | |
1263 | /* accounting, useful for userland debugging */ |
1264 | spinlock_t object_stat_lock; |
1265 | size_t object_memory; |
1266 | u32 object_count; |
1267 | }; |
1268 | |
1269 | struct drm_i915_error_state_buf { |
1270 | unsigned bytes; |
1271 | unsigned size; |
1272 | int err; |
1273 | u8 *buf; |
1274 | loff_t start; |
1275 | loff_t pos; |
1276 | }; |
1277 | |
1278 | struct i915_error_state_file_priv { |
1279 | struct drm_device *dev; |
1280 | struct drm_i915_error_state *error; |
1281 | }; |
1282 | |
1283 | struct i915_gpu_error { |
1284 | /* For hangcheck timer */ |
1285 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
1286 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
1287 | /* Hang gpu twice in this window and your context gets banned */ |
1288 | #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) |
1289 | |
1290 | struct timer_list hangcheck_timer; |
1291 | |
1292 | /* For reset and error_state handling. */ |
1293 | spinlock_t lock; |
1294 | /* Protected by the above dev->gpu_error.lock. */ |
1295 | struct drm_i915_error_state *first_error; |
1296 | struct work_struct work; |
1297 | |
1298 | |
1299 | unsigned long missed_irq_rings; |
1300 | |
1301 | /** |
1302 | * State variable controlling the reset flow and count |
1303 | * |
1304 | * This is a counter which gets incremented when reset is triggered, |
1305 | * and again when reset has been handled. So odd values (lowest bit set) |
1306 | * means that reset is in progress and even values that |
1307 | * (reset_counter >> 1):th reset was successfully completed. |
1308 | * |
1309 | * If reset is not completed succesfully, the I915_WEDGE bit is |
1310 | * set meaning that hardware is terminally sour and there is no |
1311 | * recovery. All waiters on the reset_queue will be woken when |
1312 | * that happens. |
1313 | * |
1314 | * This counter is used by the wait_seqno code to notice that reset |
1315 | * event happened and it needs to restart the entire ioctl (since most |
1316 | * likely the seqno it waited for won't ever signal anytime soon). |
1317 | * |
1318 | * This is important for lock-free wait paths, where no contended lock |
1319 | * naturally enforces the correct ordering between the bail-out of the |
1320 | * waiter and the gpu reset work code. |
1321 | */ |
1322 | atomic_t reset_counter; |
1323 | |
1324 | #define I915_RESET_IN_PROGRESS_FLAG 1 |
1325 | #define I915_WEDGED (1 << 31) |
1326 | |
1327 | /** |
1328 | * Waitqueue to signal when the reset has completed. Used by clients |
1329 | * that wait for dev_priv->mm.wedged to settle. |
1330 | */ |
1331 | #ifdef __NetBSD__ |
1332 | spinlock_t reset_lock; |
1333 | drm_waitqueue_t reset_queue; |
1334 | #else |
1335 | wait_queue_head_t reset_queue; |
1336 | #endif |
1337 | |
1338 | /* For gpu hang simulation. */ |
1339 | unsigned int stop_rings; |
1340 | |
1341 | /* For missed irq/seqno simulation. */ |
1342 | unsigned int test_irq_rings; |
1343 | }; |
1344 | |
1345 | enum modeset_restore { |
1346 | MODESET_ON_LID_OPEN, |
1347 | MODESET_DONE, |
1348 | MODESET_SUSPENDED, |
1349 | }; |
1350 | |
1351 | struct ddi_vbt_port_info { |
1352 | uint8_t hdmi_level_shift; |
1353 | |
1354 | uint8_t supports_dvi:1; |
1355 | uint8_t supports_hdmi:1; |
1356 | uint8_t supports_dp:1; |
1357 | }; |
1358 | |
1359 | struct intel_vbt_data { |
1360 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
1361 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
1362 | |
1363 | /* Feature bits */ |
1364 | unsigned int int_tv_support:1; |
1365 | unsigned int lvds_dither:1; |
1366 | unsigned int lvds_vbt:1; |
1367 | unsigned int int_crt_support:1; |
1368 | unsigned int lvds_use_ssc:1; |
1369 | unsigned int display_clock_mode:1; |
1370 | unsigned int fdi_rx_polarity_inverted:1; |
1371 | int lvds_ssc_freq; |
1372 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
1373 | |
1374 | /* eDP */ |
1375 | int edp_rate; |
1376 | int edp_lanes; |
1377 | int edp_preemphasis; |
1378 | int edp_vswing; |
1379 | bool edp_initialized; |
1380 | bool edp_support; |
1381 | int edp_bpp; |
1382 | struct edp_power_seq edp_pps; |
1383 | |
1384 | struct { |
1385 | u16 pwm_freq_hz; |
1386 | bool present; |
1387 | bool active_low_pwm; |
1388 | } backlight; |
1389 | |
1390 | /* MIPI DSI */ |
1391 | struct { |
1392 | u16 panel_id; |
1393 | } dsi; |
1394 | |
1395 | int crt_ddc_pin; |
1396 | |
1397 | int child_dev_num; |
1398 | union child_device_config *child_dev; |
1399 | |
1400 | struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; |
1401 | }; |
1402 | |
1403 | enum intel_ddb_partitioning { |
1404 | INTEL_DDB_PART_1_2, |
1405 | INTEL_DDB_PART_5_6, /* IVB+ */ |
1406 | }; |
1407 | |
1408 | struct intel_wm_level { |
1409 | bool enable; |
1410 | uint32_t pri_val; |
1411 | uint32_t spr_val; |
1412 | uint32_t cur_val; |
1413 | uint32_t fbc_val; |
1414 | }; |
1415 | |
1416 | struct ilk_wm_values { |
1417 | uint32_t wm_pipe[3]; |
1418 | uint32_t wm_lp[3]; |
1419 | uint32_t wm_lp_spr[3]; |
1420 | uint32_t wm_linetime[3]; |
1421 | bool enable_fbc_wm; |
1422 | enum intel_ddb_partitioning partitioning; |
1423 | }; |
1424 | |
1425 | /* |
1426 | * This struct helps tracking the state needed for runtime PM, which puts the |
1427 | * device in PCI D3 state. Notice that when this happens, nothing on the |
1428 | * graphics device works, even register access, so we don't get interrupts nor |
1429 | * anything else. |
1430 | * |
1431 | * Every piece of our code that needs to actually touch the hardware needs to |
1432 | * either call intel_runtime_pm_get or call intel_display_power_get with the |
1433 | * appropriate power domain. |
1434 | * |
1435 | * Our driver uses the autosuspend delay feature, which means we'll only really |
1436 | * suspend if we stay with zero refcount for a certain amount of time. The |
1437 | * default value is currently very conservative (see intel_init_runtime_pm), but |
1438 | * it can be changed with the standard runtime PM files from sysfs. |
1439 | * |
1440 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and |
1441 | * goes back to false exactly before we reenable the IRQs. We use this variable |
1442 | * to check if someone is trying to enable/disable IRQs while they're supposed |
1443 | * to be disabled. This shouldn't happen and we'll print some error messages in |
1444 | * case it happens, but if it actually happens we'll also update the variables |
1445 | * inside struct regsave so when we restore the IRQs they will contain the |
1446 | * latest expected values. |
1447 | * |
1448 | * For more, read the Documentation/power/runtime_pm.txt. |
1449 | */ |
1450 | struct i915_runtime_pm { |
1451 | bool suspended; |
1452 | bool irqs_disabled; |
1453 | |
1454 | struct { |
1455 | uint32_t deimr; |
1456 | uint32_t sdeimr; |
1457 | uint32_t gtimr; |
1458 | uint32_t gtier; |
1459 | uint32_t gen6_pmimr; |
1460 | } regsave; |
1461 | }; |
1462 | |
1463 | enum intel_pipe_crc_source { |
1464 | INTEL_PIPE_CRC_SOURCE_NONE, |
1465 | INTEL_PIPE_CRC_SOURCE_PLANE1, |
1466 | INTEL_PIPE_CRC_SOURCE_PLANE2, |
1467 | INTEL_PIPE_CRC_SOURCE_PF, |
1468 | INTEL_PIPE_CRC_SOURCE_PIPE, |
1469 | /* TV/DP on pre-gen5/vlv can't use the pipe source. */ |
1470 | INTEL_PIPE_CRC_SOURCE_TV, |
1471 | INTEL_PIPE_CRC_SOURCE_DP_B, |
1472 | INTEL_PIPE_CRC_SOURCE_DP_C, |
1473 | INTEL_PIPE_CRC_SOURCE_DP_D, |
1474 | INTEL_PIPE_CRC_SOURCE_AUTO, |
1475 | INTEL_PIPE_CRC_SOURCE_MAX, |
1476 | }; |
1477 | |
1478 | struct intel_pipe_crc_entry { |
1479 | uint32_t frame; |
1480 | uint32_t crc[5]; |
1481 | }; |
1482 | |
1483 | #define INTEL_PIPE_CRC_ENTRIES_NR 128 |
1484 | struct intel_pipe_crc { |
1485 | spinlock_t lock; |
1486 | bool opened; /* exclusive access to the result file */ |
1487 | struct intel_pipe_crc_entry *entries; |
1488 | enum intel_pipe_crc_source source; |
1489 | int head, tail; |
1490 | #ifdef __NetBSD__ |
1491 | drm_waitqueue_t wq; |
1492 | #else |
1493 | wait_queue_head_t wq; |
1494 | #endif |
1495 | }; |
1496 | |
1497 | #ifdef __NetBSD__ |
1498 | # define __i915_iomem |
1499 | # define __iomem __i915_iomem |
1500 | #endif |
1501 | |
1502 | typedef struct drm_i915_private { |
1503 | struct drm_device *dev; |
1504 | struct kmem_cache *slab; |
1505 | |
1506 | struct intel_device_info info; |
1507 | |
1508 | int relative_constants_mode; |
1509 | |
1510 | #ifdef __NetBSD__ |
1511 | bus_space_tag_t regs_bst; |
1512 | bus_space_handle_t regs_bsh; |
1513 | #endif |
1514 | void __iomem *regs; |
1515 | |
1516 | struct intel_uncore uncore; |
1517 | |
1518 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
1519 | |
1520 | |
1521 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus |
1522 | * controller on different i2c buses. */ |
1523 | struct mutex gmbus_mutex; |
1524 | |
1525 | /** |
1526 | * Base address of the gmbus and gpio block. |
1527 | */ |
1528 | uint32_t gpio_mmio_base; |
1529 | |
1530 | #ifdef __NetBSD__ |
1531 | spinlock_t gmbus_wait_lock; |
1532 | drm_waitqueue_t gmbus_wait_queue; |
1533 | #else |
1534 | wait_queue_head_t gmbus_wait_queue; |
1535 | #endif |
1536 | |
1537 | struct pci_dev *bridge_dev; |
1538 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
1539 | uint32_t last_seqno, next_seqno; |
1540 | |
1541 | drm_dma_handle_t *status_page_dmah; |
1542 | struct resource mch_res; |
1543 | |
1544 | /* protects the irq masks */ |
1545 | spinlock_t irq_lock; |
1546 | |
1547 | bool display_irqs_enabled; |
1548 | |
1549 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
1550 | struct pm_qos_request pm_qos; |
1551 | |
1552 | /* DPIO indirect register protection */ |
1553 | struct mutex dpio_lock; |
1554 | |
1555 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
1556 | union { |
1557 | u32 irq_mask; |
1558 | u32 de_irq_mask[I915_MAX_PIPES]; |
1559 | }; |
1560 | u32 gt_irq_mask; |
1561 | u32 pm_irq_mask; |
1562 | u32 pm_rps_events; |
1563 | u32 pipestat_irq_mask[I915_MAX_PIPES]; |
1564 | |
1565 | struct work_struct hotplug_work; |
1566 | bool enable_hotplug_processing; |
1567 | struct { |
1568 | unsigned long hpd_last_jiffies; |
1569 | int hpd_cnt; |
1570 | enum { |
1571 | HPD_ENABLED = 0, |
1572 | HPD_DISABLED = 1, |
1573 | HPD_MARK_DISABLED = 2 |
1574 | } hpd_mark; |
1575 | } hpd_stats[HPD_NUM_PINS]; |
1576 | u32 hpd_event_bits; |
1577 | struct timer_list hotplug_reenable_timer; |
1578 | |
1579 | struct i915_fbc fbc; |
1580 | struct intel_opregion opregion; |
1581 | struct intel_vbt_data vbt; |
1582 | |
1583 | /* overlay */ |
1584 | struct intel_overlay *overlay; |
1585 | |
1586 | /* backlight registers and fields in struct intel_panel */ |
1587 | spinlock_t backlight_lock; |
1588 | |
1589 | /* LVDS info */ |
1590 | bool no_aux_handshake; |
1591 | |
1592 | struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
1593 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
1594 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
1595 | |
1596 | unsigned int fsb_freq, mem_freq, is_ddr3; |
1597 | |
1598 | /** |
1599 | * wq - Driver workqueue for GEM. |
1600 | * |
1601 | * NOTE: Work items scheduled here are not allowed to grab any modeset |
1602 | * locks, for otherwise the flushing done in the pageflip code will |
1603 | * result in deadlocks. |
1604 | */ |
1605 | struct workqueue_struct *wq; |
1606 | |
1607 | /* Display functions */ |
1608 | struct drm_i915_display_funcs display; |
1609 | |
1610 | /* PCH chipset type */ |
1611 | enum intel_pch pch_type; |
1612 | unsigned short pch_id; |
1613 | |
1614 | unsigned long quirks; |
1615 | |
1616 | enum modeset_restore modeset_restore; |
1617 | struct mutex modeset_restore_lock; |
1618 | |
1619 | struct list_head vm_list; /* Global list of all address spaces */ |
1620 | struct i915_gtt gtt; /* VMA representing the global address space */ |
1621 | |
1622 | struct i915_gem_mm mm; |
1623 | |
1624 | /* Kernel Modesetting */ |
1625 | |
1626 | struct sdvo_device_mapping sdvo_mappings[2]; |
1627 | |
1628 | struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; |
1629 | struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; |
1630 | #ifdef __NetBSD__ |
1631 | /* XXX The locking scheme looks broken. This mutex is a stop-gap. */ |
1632 | struct spinlock pending_flip_lock; |
1633 | drm_waitqueue_t pending_flip_queue; |
1634 | #else |
1635 | wait_queue_head_t pending_flip_queue; |
1636 | #endif |
1637 | |
1638 | #ifdef CONFIG_DEBUG_FS |
1639 | struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; |
1640 | #endif |
1641 | |
1642 | int num_shared_dpll; |
1643 | struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; |
1644 | struct intel_ddi_plls ddi_plls; |
1645 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; |
1646 | |
1647 | /* Reclocking support */ |
1648 | bool render_reclock_avail; |
1649 | bool lvds_downclock_avail; |
1650 | /* indicates the reduced downclock for LVDS*/ |
1651 | int lvds_downclock; |
1652 | u16 orig_clock; |
1653 | |
1654 | bool mchbar_need_disable; |
1655 | |
1656 | struct intel_l3_parity l3_parity; |
1657 | |
1658 | /* Cannot be determined by PCIID. You must always read a register. */ |
1659 | size_t ellc_size; |
1660 | |
1661 | /* gen6+ rps state */ |
1662 | struct intel_gen6_power_mgmt rps; |
1663 | |
1664 | /* ilk-only ips/rps state. Everything in here is protected by the global |
1665 | * mchdev_lock in intel_pm.c */ |
1666 | struct intel_ilk_power_mgmt ips; |
1667 | |
1668 | struct i915_power_domains power_domains; |
1669 | |
1670 | struct i915_psr psr; |
1671 | |
1672 | struct i915_gpu_error gpu_error; |
1673 | |
1674 | struct drm_i915_gem_object *vlv_pctx; |
1675 | |
1676 | #ifdef CONFIG_DRM_I915_FBDEV |
1677 | /* list of fbdev register on this device */ |
1678 | struct intel_fbdev *fbdev; |
1679 | #endif |
1680 | |
1681 | /* |
1682 | * The console may be contended at resume, but we don't |
1683 | * want it to block on it. |
1684 | */ |
1685 | struct work_struct console_resume_work; |
1686 | |
1687 | struct drm_property *broadcast_rgb_property; |
1688 | struct drm_property *force_audio_property; |
1689 | |
1690 | uint32_t hw_context_size; |
1691 | struct list_head context_list; |
1692 | |
1693 | u32 fdi_rx_config; |
1694 | |
1695 | u32 suspend_count; |
1696 | struct i915_suspend_saved_registers regfile; |
1697 | |
1698 | struct { |
1699 | /* |
1700 | * Raw watermark latency values: |
1701 | * in 0.1us units for WM0, |
1702 | * in 0.5us units for WM1+. |
1703 | */ |
1704 | /* primary */ |
1705 | uint16_t pri_latency[5]; |
1706 | /* sprite */ |
1707 | uint16_t spr_latency[5]; |
1708 | /* cursor */ |
1709 | uint16_t cur_latency[5]; |
1710 | |
1711 | /* current hardware state */ |
1712 | struct ilk_wm_values hw; |
1713 | } wm; |
1714 | |
1715 | struct i915_runtime_pm pm; |
1716 | |
1717 | /* Old dri1 support infrastructure, beware the dragons ya fools entering |
1718 | * here! */ |
1719 | struct i915_dri1_state dri1; |
1720 | /* Old ums support infrastructure, same warning applies. */ |
1721 | struct i915_ums_state ums; |
1722 | } drm_i915_private_t; |
1723 | |
1724 | #ifdef __NetBSD__ |
1725 | # undef __iomem |
1726 | # undef __i915_iomem |
1727 | #endif |
1728 | |
1729 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) |
1730 | { |
1731 | return dev->dev_private; |
1732 | } |
1733 | |
1734 | /* Iterate over initialised rings */ |
1735 | #define for_each_ring(ring__, dev_priv__, i__) \ |
1736 | for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ |
1737 | if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) |
1738 | |
1739 | enum hdmi_force_audio { |
1740 | HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ |
1741 | HDMI_AUDIO_OFF, /* force turn off HDMI audio */ |
1742 | HDMI_AUDIO_AUTO, /* trust EDID */ |
1743 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
1744 | }; |
1745 | |
1746 | #define I915_GTT_OFFSET_NONE ((u32)-1) |
1747 | |
1748 | struct drm_i915_gem_object_ops { |
1749 | /* Interface between the GEM object and its backing storage. |
1750 | * get_pages() is called once prior to the use of the associated set |
1751 | * of pages before to binding them into the GTT, and put_pages() is |
1752 | * called after we no longer need them. As we expect there to be |
1753 | * associated cost with migrating pages between the backing storage |
1754 | * and making them available for the GPU (e.g. clflush), we may hold |
1755 | * onto the pages after they are no longer referenced by the GPU |
1756 | * in case they may be used again shortly (for example migrating the |
1757 | * pages to a different memory domain within the GTT). put_pages() |
1758 | * will therefore most likely be called when the object itself is |
1759 | * being released or under memory pressure (where we attempt to |
1760 | * reap pages for the shrinker). |
1761 | */ |
1762 | int (*get_pages)(struct drm_i915_gem_object *); |
1763 | void (*put_pages)(struct drm_i915_gem_object *); |
1764 | }; |
1765 | |
1766 | struct drm_i915_gem_object { |
1767 | struct drm_gem_object base; |
1768 | |
1769 | const struct drm_i915_gem_object_ops *ops; |
1770 | |
1771 | /** List of VMAs backed by this object */ |
1772 | struct list_head vma_list; |
1773 | |
1774 | /** Stolen memory for this object, instead of being backed by shmem. */ |
1775 | struct drm_mm_node *stolen; |
1776 | struct list_head global_list; |
1777 | |
1778 | struct list_head ring_list; |
1779 | /** Used in execbuf to temporarily hold a ref */ |
1780 | struct list_head obj_exec_link; |
1781 | |
1782 | /** |
1783 | * This is set if the object is on the active lists (has pending |
1784 | * rendering and so a non-zero seqno), and is not set if it i s on |
1785 | * inactive (ready to be unbound) list. |
1786 | */ |
1787 | unsigned int active:1; |
1788 | |
1789 | /** |
1790 | * This is set if the object has been written to since last bound |
1791 | * to the GTT |
1792 | */ |
1793 | unsigned int dirty:1; |
1794 | |
1795 | /** |
1796 | * Fence register bits (if any) for this object. Will be set |
1797 | * as needed when mapped into the GTT. |
1798 | * Protected by dev->struct_mutex. |
1799 | */ |
1800 | signed int fence_reg:I915_MAX_NUM_FENCE_BITS; |
1801 | |
1802 | /** |
1803 | * Advice: are the backing pages purgeable? |
1804 | */ |
1805 | unsigned int madv:2; |
1806 | |
1807 | /** |
1808 | * Current tiling mode for the object. |
1809 | */ |
1810 | unsigned int tiling_mode:2; |
1811 | /** |
1812 | * Whether the tiling parameters for the currently associated fence |
1813 | * register have changed. Note that for the purposes of tracking |
1814 | * tiling changes we also treat the unfenced register, the register |
1815 | * slot that the object occupies whilst it executes a fenced |
1816 | * command (such as BLT on gen2/3), as a "fence". |
1817 | */ |
1818 | unsigned int fence_dirty:1; |
1819 | |
1820 | /** |
1821 | * Is the object at the current location in the gtt mappable and |
1822 | * fenceable? Used to avoid costly recalculations. |
1823 | */ |
1824 | unsigned int map_and_fenceable:1; |
1825 | |
1826 | /** |
1827 | * Whether the current gtt mapping needs to be mappable (and isn't just |
1828 | * mappable by accident). Track pin and fault separate for a more |
1829 | * accurate mappable working set. |
1830 | */ |
1831 | unsigned int fault_mappable:1; |
1832 | unsigned int pin_mappable:1; |
1833 | unsigned int pin_display:1; |
1834 | |
1835 | /* |
1836 | * Is the GPU currently using a fence to access this buffer, |
1837 | */ |
1838 | unsigned int pending_fenced_gpu_access:1; |
1839 | unsigned int fenced_gpu_access:1; |
1840 | |
1841 | unsigned int cache_level:3; |
1842 | |
1843 | unsigned int has_aliasing_ppgtt_mapping:1; |
1844 | unsigned int has_global_gtt_mapping:1; |
1845 | unsigned int has_dma_mapping:1; |
1846 | |
1847 | #ifdef __NetBSD__ |
1848 | struct pglist igo_pageq; |
1849 | bus_dma_segment_t *pages; /* `pages' is an expedient misnomer. */ |
1850 | int igo_nsegs; |
1851 | bus_dmamap_t igo_dmamap; |
1852 | #else |
1853 | struct sg_table *pages; |
1854 | #endif |
1855 | int pages_pin_count; |
1856 | |
1857 | /* prime dma-buf support */ |
1858 | void *dma_buf_vmapping; |
1859 | int vmapping_count; |
1860 | |
1861 | struct intel_ring_buffer *ring; |
1862 | |
1863 | /** Breadcrumb of last rendering to the buffer. */ |
1864 | uint32_t last_read_seqno; |
1865 | uint32_t last_write_seqno; |
1866 | /** Breadcrumb of last fenced GPU access to the buffer. */ |
1867 | uint32_t last_fenced_seqno; |
1868 | |
1869 | /** Current tiling stride for the object, if it's tiled. */ |
1870 | uint32_t stride; |
1871 | |
1872 | /** References from framebuffers, locks out tiling changes. */ |
1873 | unsigned long framebuffer_references; |
1874 | |
1875 | /** Record of address bit 17 of each page at last unbind. */ |
1876 | unsigned long *bit_17; |
1877 | |
1878 | /** User space pin count and filp owning the pin */ |
1879 | unsigned long user_pin_count; |
1880 | struct drm_file *pin_filp; |
1881 | |
1882 | /** for phy allocated objects */ |
1883 | drm_dma_handle_t *phys_handle; |
1884 | }; |
1885 | |
1886 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
1887 | |
1888 | /** |
1889 | * Request queue structure. |
1890 | * |
1891 | * The request queue allows us to note sequence numbers that have been emitted |
1892 | * and may be associated with active buffers to be retired. |
1893 | * |
1894 | * By keeping this list, we can avoid having to do questionable |
1895 | * sequence-number comparisons on buffer last_rendering_seqnos, and associate |
1896 | * an emission time with seqnos for tracking how far ahead of the GPU we are. |
1897 | */ |
1898 | struct drm_i915_gem_request { |
1899 | /** On Which ring this request was generated */ |
1900 | struct intel_ring_buffer *ring; |
1901 | |
1902 | /** GEM sequence number associated with this request. */ |
1903 | uint32_t seqno; |
1904 | |
1905 | /** Position in the ringbuffer of the start of the request */ |
1906 | u32 head; |
1907 | |
1908 | /** Position in the ringbuffer of the end of the request */ |
1909 | u32 tail; |
1910 | |
1911 | /** Context related to this request */ |
1912 | struct i915_hw_context *ctx; |
1913 | |
1914 | /** Batch buffer related to this request if any */ |
1915 | struct drm_i915_gem_object *batch_obj; |
1916 | |
1917 | /** Time at which this request was emitted, in jiffies. */ |
1918 | unsigned long emitted_jiffies; |
1919 | |
1920 | /** global list entry for this request */ |
1921 | struct list_head list; |
1922 | |
1923 | struct drm_i915_file_private *file_priv; |
1924 | /** file_priv list entry for this request */ |
1925 | struct list_head client_list; |
1926 | }; |
1927 | |
1928 | struct drm_i915_file_private { |
1929 | struct drm_i915_private *dev_priv; |
1930 | struct drm_file *file; |
1931 | |
1932 | struct { |
1933 | spinlock_t lock; |
1934 | struct list_head request_list; |
1935 | struct delayed_work idle_work; |
1936 | } mm; |
1937 | struct idr context_idr; |
1938 | |
1939 | struct i915_hw_context *private_default_ctx; |
1940 | atomic_t rps_wait_boost; |
1941 | }; |
1942 | |
1943 | /* |
1944 | * A command that requires special handling by the command parser. |
1945 | */ |
1946 | struct drm_i915_cmd_descriptor { |
1947 | /* |
1948 | * Flags describing how the command parser processes the command. |
1949 | * |
1950 | * CMD_DESC_FIXED: The command has a fixed length if this is set, |
1951 | * a length mask if not set |
1952 | * CMD_DESC_SKIP: The command is allowed but does not follow the |
1953 | * standard length encoding for the opcode range in |
1954 | * which it falls |
1955 | * CMD_DESC_REJECT: The command is never allowed |
1956 | * CMD_DESC_REGISTER: The command should be checked against the |
1957 | * register whitelist for the appropriate ring |
1958 | * CMD_DESC_MASTER: The command is allowed if the submitting process |
1959 | * is the DRM master |
1960 | */ |
1961 | u32 flags; |
1962 | #define CMD_DESC_FIXED (1<<0) |
1963 | #define CMD_DESC_SKIP (1<<1) |
1964 | #define CMD_DESC_REJECT (1<<2) |
1965 | #define CMD_DESC_REGISTER (1<<3) |
1966 | #define CMD_DESC_BITMASK (1<<4) |
1967 | #define CMD_DESC_MASTER (1<<5) |
1968 | |
1969 | /* |
1970 | * The command's unique identification bits and the bitmask to get them. |
1971 | * This isn't strictly the opcode field as defined in the spec and may |
1972 | * also include type, subtype, and/or subop fields. |
1973 | */ |
1974 | struct { |
1975 | u32 value; |
1976 | u32 mask; |
1977 | } cmd; |
1978 | |
1979 | /* |
1980 | * The command's length. The command is either fixed length (i.e. does |
1981 | * not include a length field) or has a length field mask. The flag |
1982 | * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has |
1983 | * a length mask. All command entries in a command table must include |
1984 | * length information. |
1985 | */ |
1986 | union { |
1987 | u32 fixed; |
1988 | u32 mask; |
1989 | } length; |
1990 | |
1991 | /* |
1992 | * Describes where to find a register address in the command to check |
1993 | * against the ring's register whitelist. Only valid if flags has the |
1994 | * CMD_DESC_REGISTER bit set. |
1995 | */ |
1996 | struct { |
1997 | u32 offset; |
1998 | u32 mask; |
1999 | } reg; |
2000 | |
2001 | #define MAX_CMD_DESC_BITMASKS 3 |
2002 | /* |
2003 | * Describes command checks where a particular dword is masked and |
2004 | * compared against an expected value. If the command does not match |
2005 | * the expected value, the parser rejects it. Only valid if flags has |
2006 | * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero |
2007 | * are valid. |
2008 | */ |
2009 | struct { |
2010 | u32 offset; |
2011 | u32 mask; |
2012 | u32 expected; |
2013 | } bits[MAX_CMD_DESC_BITMASKS]; |
2014 | }; |
2015 | |
2016 | /* |
2017 | * A table of commands requiring special handling by the command parser. |
2018 | * |
2019 | * Each ring has an array of tables. Each table consists of an array of command |
2020 | * descriptors, which must be sorted with command opcodes in ascending order. |
2021 | */ |
2022 | struct drm_i915_cmd_table { |
2023 | const struct drm_i915_cmd_descriptor *table; |
2024 | int count; |
2025 | }; |
2026 | |
2027 | #define INTEL_INFO(dev) (&to_i915(dev)->info) |
2028 | |
2029 | #define IS_I830(dev) ((dev)->pdev->device == 0x3577) |
2030 | #define IS_845G(dev) ((dev)->pdev->device == 0x2562) |
2031 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
2032 | #define IS_I865G(dev) ((dev)->pdev->device == 0x2572) |
2033 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
2034 | #define IS_I915GM(dev) ((dev)->pdev->device == 0x2592) |
2035 | #define IS_I945G(dev) ((dev)->pdev->device == 0x2772) |
2036 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
2037 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) |
2038 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) |
2039 | #define IS_GM45(dev) ((dev)->pdev->device == 0x2A42) |
2040 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
2041 | #define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001) |
2042 | #define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011) |
2043 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
2044 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
2045 | #define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046) |
2046 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
2047 | #define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \ |
2048 | (dev)->pdev->device == 0x0152 || \ |
2049 | (dev)->pdev->device == 0x015a) |
2050 | #define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \ |
2051 | (dev)->pdev->device == 0x0106 || \ |
2052 | (dev)->pdev->device == 0x010A) |
2053 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
2054 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
2055 | #define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8) |
2056 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
2057 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
2058 | ((dev)->pdev->device & 0xFF00) == 0x0C00) |
2059 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
2060 | (((dev)->pdev->device & 0xf) == 0x2 || \ |
2061 | ((dev)->pdev->device & 0xf) == 0x6 || \ |
2062 | ((dev)->pdev->device & 0xf) == 0xe)) |
2063 | #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ |
2064 | ((dev)->pdev->device & 0xFF00) == 0x0A00) |
2065 | #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) |
2066 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
2067 | ((dev)->pdev->device & 0x00F0) == 0x0020) |
2068 | /* ULX machines are also considered ULT. */ |
2069 | #define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ |
2070 | (dev)->pdev->device == 0x0A1E) |
2071 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
2072 | |
2073 | /* |
2074 | * The genX designation typically refers to the render engine, so render |
2075 | * capability related checks should use IS_GEN, while display and other checks |
2076 | * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular |
2077 | * chips, etc.). |
2078 | */ |
2079 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) |
2080 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) |
2081 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) |
2082 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) |
2083 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
2084 | #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) |
2085 | #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) |
2086 | |
2087 | #define RENDER_RING (1<<RCS) |
2088 | #define BSD_RING (1<<VCS) |
2089 | #define BLT_RING (1<<BCS) |
2090 | #define VEBOX_RING (1<<VECS) |
2091 | #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) |
2092 | #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) |
2093 | #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) |
2094 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) |
2095 | #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) |
2096 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
2097 | |
2098 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
2099 | #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) |
2100 | #define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \ |
2101 | && !IS_BROADWELL(dev)) |
2102 | #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) |
2103 | #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) |
2104 | |
2105 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
2106 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
2107 | |
2108 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
2109 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) |
2110 | /* |
2111 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts |
2112 | * even when in MSI mode. This results in spurious interrupt warnings if the |
2113 | * legacy irq no. is shared with another device. The kernel then disables that |
2114 | * interrupt source and so prevents the other device from working properly. |
2115 | */ |
2116 | #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) |
2117 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) |
2118 | |
2119 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
2120 | * rows, which changed the alignment requirements and fence programming. |
2121 | */ |
2122 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ |
2123 | IS_I915GM(dev))) |
2124 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) |
2125 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
2126 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
2127 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
2128 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
2129 | |
2130 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) |
2131 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
2132 | #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
2133 | |
2134 | #define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) |
2135 | |
2136 | #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) |
2137 | #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) |
2138 | #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
2139 | #define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ |
2140 | #define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev)) |
2141 | |
2142 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
2143 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
2144 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
2145 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
2146 | #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 |
2147 | #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 |
2148 | |
2149 | #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) |
2150 | #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
2151 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
2152 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
2153 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) |
2154 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) |
2155 | |
2156 | /* DPF == dynamic parity feature */ |
2157 | #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
2158 | #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) |
2159 | |
2160 | #define GT_FREQUENCY_MULTIPLIER 50 |
2161 | |
2162 | #include "i915_trace.h" |
2163 | |
2164 | extern const struct drm_ioctl_desc i915_ioctls[]; |
2165 | extern int i915_max_ioctl; |
2166 | |
2167 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
2168 | extern int i915_resume(struct drm_device *dev); |
2169 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
2170 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
2171 | |
2172 | extern int i915_drm_freeze(struct drm_device *dev); |
2173 | extern int i915_drm_thaw_early(struct drm_device *dev); |
2174 | extern int i915_drm_thaw(struct drm_device *dev); |
2175 | |
2176 | /* i915_params.c */ |
2177 | struct i915_params { |
2178 | int modeset; |
2179 | int panel_ignore_lid; |
2180 | unsigned int powersave; |
2181 | int semaphores; |
2182 | unsigned int lvds_downclock; |
2183 | int lvds_channel_mode; |
2184 | int panel_use_ssc; |
2185 | int vbt_sdvo_panel_type; |
2186 | int enable_rc6; |
2187 | int enable_fbc; |
2188 | int enable_ppgtt; |
2189 | int enable_psr; |
2190 | unsigned int preliminary_hw_support; |
2191 | int disable_power_well; |
2192 | int enable_ips; |
2193 | int invert_brightness; |
2194 | int enable_cmd_parser; |
2195 | /* leave bools at the end to not create holes */ |
2196 | bool enable_hangcheck; |
2197 | bool fastboot; |
2198 | bool prefault_disable; |
2199 | bool reset; |
2200 | bool disable_display; |
2201 | }; |
2202 | extern struct i915_params i915 __read_mostly; |
2203 | |
2204 | /* i915_dma.c */ |
2205 | void i915_update_dri1_breadcrumb(struct drm_device *dev); |
2206 | extern void i915_kernel_lost_context(struct drm_device * dev); |
2207 | extern int i915_driver_load(struct drm_device *, unsigned long flags); |
2208 | extern int i915_driver_unload(struct drm_device *); |
2209 | extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); |
2210 | extern void i915_driver_lastclose(struct drm_device * dev); |
2211 | extern void i915_driver_preclose(struct drm_device *dev, |
2212 | struct drm_file *file_priv); |
2213 | extern void i915_driver_postclose(struct drm_device *dev, |
2214 | struct drm_file *file_priv); |
2215 | extern int i915_driver_device_is_agp(struct drm_device * dev); |
2216 | #ifdef CONFIG_COMPAT |
2217 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
2218 | unsigned long arg); |
2219 | #endif |
2220 | extern int i915_emit_box(struct drm_device *dev, |
2221 | struct drm_clip_rect *box, |
2222 | int DR1, int DR4); |
2223 | extern int intel_gpu_reset(struct drm_device *dev); |
2224 | extern int i915_reset(struct drm_device *dev); |
2225 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
2226 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
2227 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
2228 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
2229 | |
2230 | extern void intel_console_resume(struct work_struct *work); |
2231 | |
2232 | /* i915_irq.c */ |
2233 | void i915_queue_hangcheck(struct drm_device *dev); |
2234 | __printf(3, 4) |
2235 | void i915_handle_error(struct drm_device *dev, bool wedged, |
2236 | const char *fmt, ...); |
2237 | |
2238 | void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, |
2239 | int new_delay); |
2240 | extern void intel_irq_init(struct drm_device *dev); |
2241 | extern void intel_hpd_init(struct drm_device *dev); |
2242 | |
2243 | extern void intel_uncore_sanitize(struct drm_device *dev); |
2244 | extern void intel_uncore_early_sanitize(struct drm_device *dev); |
2245 | extern void intel_uncore_init(struct drm_device *dev); |
2246 | extern void intel_uncore_check_errors(struct drm_device *dev); |
2247 | extern void intel_uncore_fini(struct drm_device *dev); |
2248 | extern void intel_uncore_destroy(struct drm_device *dev); |
2249 | |
2250 | void |
2251 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, |
2252 | u32 status_mask); |
2253 | |
2254 | void |
2255 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, |
2256 | u32 status_mask); |
2257 | |
2258 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); |
2259 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); |
2260 | |
2261 | /* i915_gem.c */ |
2262 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
2263 | struct drm_file *file_priv); |
2264 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
2265 | struct drm_file *file_priv); |
2266 | int i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
2267 | struct drm_file *file_priv); |
2268 | int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
2269 | struct drm_file *file_priv); |
2270 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
2271 | struct drm_file *file_priv); |
2272 | int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
2273 | struct drm_file *file_priv); |
2274 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
2275 | struct drm_file *file_priv); |
2276 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
2277 | struct drm_file *file_priv); |
2278 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
2279 | struct drm_file *file_priv); |
2280 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
2281 | struct drm_file *file_priv); |
2282 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
2283 | struct drm_file *file_priv); |
2284 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
2285 | struct drm_file *file_priv); |
2286 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
2287 | struct drm_file *file_priv); |
2288 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
2289 | struct drm_file *file); |
2290 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
2291 | struct drm_file *file); |
2292 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
2293 | struct drm_file *file_priv); |
2294 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
2295 | struct drm_file *file_priv); |
2296 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
2297 | struct drm_file *file_priv); |
2298 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
2299 | struct drm_file *file_priv); |
2300 | int i915_gem_set_tiling(struct drm_device *dev, void *data, |
2301 | struct drm_file *file_priv); |
2302 | int i915_gem_get_tiling(struct drm_device *dev, void *data, |
2303 | struct drm_file *file_priv); |
2304 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
2305 | struct drm_file *file_priv); |
2306 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
2307 | struct drm_file *file_priv); |
2308 | void i915_gem_load(struct drm_device *dev); |
2309 | void *i915_gem_object_alloc(struct drm_device *dev); |
2310 | void i915_gem_object_free(struct drm_i915_gem_object *obj); |
2311 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
2312 | const struct drm_i915_gem_object_ops *ops); |
2313 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
2314 | size_t size); |
2315 | void i915_init_vm(struct drm_i915_private *dev_priv, |
2316 | struct i915_address_space *vm); |
2317 | void i915_gem_free_object(struct drm_gem_object *obj); |
2318 | void i915_gem_vma_destroy(struct i915_vma *vma); |
2319 | |
2320 | #define PIN_MAPPABLE 0x1 |
2321 | #define PIN_NONBLOCK 0x2 |
2322 | #define PIN_GLOBAL 0x4 |
2323 | #define PIN_OFFSET_BIAS 0x8 |
2324 | #define PIN_OFFSET_MASK (~4095) |
2325 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
2326 | struct i915_address_space *vm, |
2327 | uint32_t alignment, |
2328 | uint64_t flags); |
2329 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
2330 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
2331 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
2332 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
2333 | void i915_gem_lastclose(struct drm_device *dev); |
2334 | |
2335 | int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, |
2336 | int *needs_clflush); |
2337 | |
2338 | int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
2339 | #ifdef __NetBSD__ /* XXX */ |
2340 | static inline struct page * |
2341 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
2342 | { |
2343 | |
2344 | /* |
2345 | * Pages must be pinned so that we need not hold the lock to |
2346 | * prevent them from disappearing. |
2347 | */ |
2348 | KASSERT(obj->pages != NULL); |
2349 | mutex_enter(obj->base.gemo_shm_uao->vmobjlock); |
2350 | struct vm_page *const page = uvm_pagelookup(obj->base.gemo_shm_uao, |
2351 | ptoa(n)); |
2352 | mutex_exit(obj->base.gemo_shm_uao->vmobjlock); |
2353 | |
2354 | return container_of(page, struct page, p_vmp); |
2355 | } |
2356 | #else |
2357 | static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
2358 | { |
2359 | struct sg_page_iter sg_iter; |
2360 | |
2361 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) |
2362 | return sg_page_iter_page(&sg_iter); |
2363 | |
2364 | return NULL; |
2365 | } |
2366 | #endif |
2367 | static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
2368 | { |
2369 | BUG_ON(obj->pages == NULL); |
2370 | obj->pages_pin_count++; |
2371 | } |
2372 | static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) |
2373 | { |
2374 | BUG_ON(obj->pages_pin_count == 0); |
2375 | obj->pages_pin_count--; |
2376 | } |
2377 | |
2378 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
2379 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
2380 | struct intel_ring_buffer *to); |
2381 | void i915_vma_move_to_active(struct i915_vma *vma, |
2382 | struct intel_ring_buffer *ring); |
2383 | int i915_gem_dumb_create(struct drm_file *file_priv, |
2384 | struct drm_device *dev, |
2385 | struct drm_mode_create_dumb *args); |
2386 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, |
2387 | uint32_t handle, uint64_t *offset); |
2388 | /** |
2389 | * Returns true if seq1 is later than seq2. |
2390 | */ |
2391 | static inline bool |
2392 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
2393 | { |
2394 | return (int32_t)(seq1 - seq2) >= 0; |
2395 | } |
2396 | |
2397 | int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
2398 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
2399 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
2400 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
2401 | |
2402 | static inline bool |
2403 | i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) |
2404 | { |
2405 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2406 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2407 | dev_priv->fence_regs[obj->fence_reg].pin_count++; |
2408 | return true; |
2409 | } else |
2410 | return false; |
2411 | } |
2412 | |
2413 | static inline void |
2414 | i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) |
2415 | { |
2416 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2417 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2418 | WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); |
2419 | dev_priv->fence_regs[obj->fence_reg].pin_count--; |
2420 | } |
2421 | } |
2422 | |
2423 | struct drm_i915_gem_request * |
2424 | i915_gem_find_active_request(struct intel_ring_buffer *ring); |
2425 | |
2426 | bool i915_gem_retire_requests(struct drm_device *dev); |
2427 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
2428 | bool interruptible); |
2429 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
2430 | { |
2431 | return unlikely(atomic_read(&error->reset_counter) |
2432 | & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); |
2433 | } |
2434 | |
2435 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
2436 | { |
2437 | return atomic_read(&error->reset_counter) & I915_WEDGED; |
2438 | } |
2439 | |
2440 | static inline u32 i915_reset_count(struct i915_gpu_error *error) |
2441 | { |
2442 | return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; |
2443 | } |
2444 | |
2445 | void i915_gem_reset(struct drm_device *dev); |
2446 | bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
2447 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
2448 | int __must_check i915_gem_init(struct drm_device *dev); |
2449 | int __must_check i915_gem_init_hw(struct drm_device *dev); |
2450 | int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice); |
2451 | void i915_gem_init_swizzling(struct drm_device *dev); |
2452 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
2453 | int __must_check i915_gpu_idle(struct drm_device *dev); |
2454 | int __must_check i915_gem_suspend(struct drm_device *dev); |
2455 | int __i915_add_request(struct intel_ring_buffer *ring, |
2456 | struct drm_file *file, |
2457 | struct drm_i915_gem_object *batch_obj, |
2458 | u32 *seqno); |
2459 | #define i915_add_request(ring, seqno) \ |
2460 | __i915_add_request(ring, NULL, NULL, seqno) |
2461 | int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, |
2462 | uint32_t seqno); |
2463 | #ifdef __NetBSD__ /* XXX */ |
2464 | int i915_gem_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, |
2465 | int, int, vm_prot_t, int); |
2466 | #else |
2467 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
2468 | #endif |
2469 | int __must_check |
2470 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
2471 | bool write); |
2472 | int __must_check |
2473 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); |
2474 | int __must_check |
2475 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
2476 | u32 alignment, |
2477 | struct intel_ring_buffer *pipelined); |
2478 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); |
2479 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, |
2480 | int align); |
2481 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); |
2482 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
2483 | |
2484 | uint32_t |
2485 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); |
2486 | uint32_t |
2487 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
2488 | int tiling_mode, bool fenced); |
2489 | |
2490 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
2491 | enum i915_cache_level cache_level); |
2492 | |
2493 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
2494 | struct dma_buf *dma_buf); |
2495 | |
2496 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
2497 | struct drm_gem_object *gem_obj, int flags); |
2498 | |
2499 | void i915_gem_restore_fences(struct drm_device *dev); |
2500 | |
2501 | unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, |
2502 | struct i915_address_space *vm); |
2503 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); |
2504 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
2505 | struct i915_address_space *vm); |
2506 | unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, |
2507 | struct i915_address_space *vm); |
2508 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
2509 | struct i915_address_space *vm); |
2510 | struct i915_vma * |
2511 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
2512 | struct i915_address_space *vm); |
2513 | |
2514 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); |
2515 | static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { |
2516 | struct i915_vma *vma; |
2517 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
2518 | if (vma->pin_count > 0) |
2519 | return true; |
2520 | return false; |
2521 | } |
2522 | |
2523 | /* Some GGTT VM helpers */ |
2524 | #define obj_to_ggtt(obj) \ |
2525 | (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) |
2526 | static inline bool i915_is_ggtt(struct i915_address_space *vm) |
2527 | { |
2528 | struct i915_address_space *ggtt = |
2529 | &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; |
2530 | return vm == ggtt; |
2531 | } |
2532 | |
2533 | static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) |
2534 | { |
2535 | return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); |
2536 | } |
2537 | |
2538 | static inline unsigned long |
2539 | i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) |
2540 | { |
2541 | return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); |
2542 | } |
2543 | |
2544 | static inline unsigned long |
2545 | i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) |
2546 | { |
2547 | return i915_gem_obj_size(obj, obj_to_ggtt(obj)); |
2548 | } |
2549 | |
2550 | static inline int __must_check |
2551 | i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, |
2552 | uint32_t alignment, |
2553 | unsigned flags) |
2554 | { |
2555 | return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); |
2556 | } |
2557 | |
2558 | static inline int |
2559 | i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) |
2560 | { |
2561 | return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); |
2562 | } |
2563 | |
2564 | void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); |
2565 | |
2566 | /* i915_gem_context.c */ |
2567 | #define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) |
2568 | int __must_check i915_gem_context_init(struct drm_device *dev); |
2569 | void i915_gem_context_fini(struct drm_device *dev); |
2570 | void i915_gem_context_reset(struct drm_device *dev); |
2571 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); |
2572 | int i915_gem_context_enable(struct drm_i915_private *dev_priv); |
2573 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
2574 | int i915_switch_context(struct intel_ring_buffer *ring, |
2575 | struct i915_hw_context *to); |
2576 | struct i915_hw_context * |
2577 | i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); |
2578 | void i915_gem_context_free(struct kref *ctx_ref); |
2579 | static inline void i915_gem_context_reference(struct i915_hw_context *ctx) |
2580 | { |
2581 | kref_get(&ctx->ref); |
2582 | } |
2583 | |
2584 | static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) |
2585 | { |
2586 | kref_put(&ctx->ref, i915_gem_context_free); |
2587 | } |
2588 | |
2589 | static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) |
2590 | { |
2591 | return c->id == DEFAULT_CONTEXT_ID; |
2592 | } |
2593 | |
2594 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
2595 | struct drm_file *file); |
2596 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
2597 | struct drm_file *file); |
2598 | |
2599 | /* i915_gem_evict.c */ |
2600 | int __must_check i915_gem_evict_something(struct drm_device *dev, |
2601 | struct i915_address_space *vm, |
2602 | int min_size, |
2603 | unsigned alignment, |
2604 | unsigned cache_level, |
2605 | unsigned long start, |
2606 | unsigned long end, |
2607 | unsigned flags); |
2608 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
2609 | int i915_gem_evict_everything(struct drm_device *dev); |
2610 | |
2611 | /* i915_gem_gtt.c */ |
2612 | void i915_check_and_clear_faults(struct drm_device *dev); |
2613 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev); |
2614 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
2615 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
2616 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
2617 | void i915_gem_init_global_gtt(struct drm_device *dev); |
2618 | void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, |
2619 | unsigned long mappable_end, unsigned long end); |
2620 | int i915_gem_gtt_init(struct drm_device *dev); |
2621 | static inline void i915_gem_chipset_flush(struct drm_device *dev) |
2622 | { |
2623 | if (INTEL_INFO(dev)->gen < 6) |
2624 | intel_gtt_chipset_flush(); |
2625 | } |
2626 | int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); |
2627 | bool intel_enable_ppgtt(struct drm_device *dev, bool full); |
2628 | |
2629 | /* i915_gem_stolen.c */ |
2630 | int i915_gem_init_stolen(struct drm_device *dev); |
2631 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); |
2632 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev); |
2633 | void i915_gem_cleanup_stolen(struct drm_device *dev); |
2634 | struct drm_i915_gem_object * |
2635 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size); |
2636 | struct drm_i915_gem_object * |
2637 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
2638 | u32 stolen_offset, |
2639 | u32 gtt_offset, |
2640 | u32 size); |
2641 | void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); |
2642 | |
2643 | /* i915_gem_tiling.c */ |
2644 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
2645 | { |
2646 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2647 | |
2648 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
2649 | obj->tiling_mode != I915_TILING_NONE; |
2650 | } |
2651 | |
2652 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
2653 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
2654 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
2655 | |
2656 | /* i915_gem_debug.c */ |
2657 | #if WATCH_LISTS |
2658 | int i915_verify_lists(struct drm_device *dev); |
2659 | #else |
2660 | #define i915_verify_lists(dev) 0 |
2661 | #endif |
2662 | |
2663 | /* i915_debugfs.c */ |
2664 | int i915_debugfs_init(struct drm_minor *minor); |
2665 | void i915_debugfs_cleanup(struct drm_minor *minor); |
2666 | #ifdef CONFIG_DEBUG_FS |
2667 | void intel_display_crc_init(struct drm_device *dev); |
2668 | #else |
2669 | static inline void intel_display_crc_init(struct drm_device *dev) {} |
2670 | #endif |
2671 | |
2672 | /* i915_gpu_error.c */ |
2673 | __printf(2, 3) |
2674 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); |
2675 | int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, |
2676 | const struct i915_error_state_file_priv *error); |
2677 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, |
2678 | size_t count, loff_t pos); |
2679 | static inline void i915_error_state_buf_release( |
2680 | struct drm_i915_error_state_buf *eb) |
2681 | { |
2682 | kfree(eb->buf); |
2683 | } |
2684 | void i915_capture_error_state(struct drm_device *dev, bool wedge, |
2685 | const char *error_msg); |
2686 | void i915_error_state_get(struct drm_device *dev, |
2687 | struct i915_error_state_file_priv *error_priv); |
2688 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv); |
2689 | void i915_destroy_error_state(struct drm_device *dev); |
2690 | |
2691 | void (struct drm_device *dev, uint32_t *instdone); |
2692 | const char *i915_cache_level_str(int type); |
2693 | |
2694 | /* i915_cmd_parser.c */ |
2695 | void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring); |
2696 | bool i915_needs_cmd_parser(struct intel_ring_buffer *ring); |
2697 | int i915_parse_cmds(struct intel_ring_buffer *ring, |
2698 | struct drm_i915_gem_object *batch_obj, |
2699 | u32 batch_start_offset, |
2700 | bool is_master); |
2701 | |
2702 | /* i915_suspend.c */ |
2703 | extern int i915_save_state(struct drm_device *dev); |
2704 | extern int i915_restore_state(struct drm_device *dev); |
2705 | |
2706 | /* i915_ums.c */ |
2707 | void i915_save_display_reg(struct drm_device *dev); |
2708 | void i915_restore_display_reg(struct drm_device *dev); |
2709 | |
2710 | /* i915_sysfs.c */ |
2711 | void i915_setup_sysfs(struct drm_device *dev_priv); |
2712 | void i915_teardown_sysfs(struct drm_device *dev_priv); |
2713 | |
2714 | /* intel_i2c.c */ |
2715 | extern int intel_setup_gmbus(struct drm_device *dev); |
2716 | extern void intel_teardown_gmbus(struct drm_device *dev); |
2717 | static inline bool intel_gmbus_is_port_valid(unsigned port) |
2718 | { |
2719 | return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); |
2720 | } |
2721 | |
2722 | extern struct i2c_adapter *intel_gmbus_get_adapter( |
2723 | struct drm_i915_private *dev_priv, unsigned port); |
2724 | extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
2725 | extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); |
2726 | static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
2727 | { |
2728 | return container_of(adapter, struct intel_gmbus, adapter)->force_bit; |
2729 | } |
2730 | extern void intel_i2c_reset(struct drm_device *dev); |
2731 | |
2732 | /* intel_opregion.c */ |
2733 | struct intel_encoder; |
2734 | #ifdef CONFIG_ACPI |
2735 | extern int intel_opregion_setup(struct drm_device *dev); |
2736 | extern void intel_opregion_init(struct drm_device *dev); |
2737 | extern void intel_opregion_fini(struct drm_device *dev); |
2738 | extern void intel_opregion_asle_intr(struct drm_device *dev); |
2739 | extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, |
2740 | bool enable); |
2741 | extern int intel_opregion_notify_adapter(struct drm_device *dev, |
2742 | pci_power_t state); |
2743 | #else |
2744 | static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } |
2745 | static inline void intel_opregion_init(struct drm_device *dev) { return; } |
2746 | static inline void intel_opregion_fini(struct drm_device *dev) { return; } |
2747 | static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } |
2748 | static inline int |
2749 | intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) |
2750 | { |
2751 | return 0; |
2752 | } |
2753 | static inline int |
2754 | intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) |
2755 | { |
2756 | return 0; |
2757 | } |
2758 | #endif |
2759 | |
2760 | /* intel_acpi.c */ |
2761 | #ifdef CONFIG_ACPI |
2762 | #ifdef __NetBSD__ |
2763 | extern void intel_register_dsm_handler(struct drm_device *); |
2764 | #else |
2765 | extern void intel_register_dsm_handler(void); |
2766 | #endif |
2767 | extern void intel_unregister_dsm_handler(void); |
2768 | #else |
2769 | #ifdef __NetBSD__ |
2770 | static inline void |
2771 | intel_register_dsm_handler(struct drm_device *dev) |
2772 | { |
2773 | return; |
2774 | } |
2775 | #else |
2776 | static inline void intel_register_dsm_handler(void) { return; } |
2777 | #endif |
2778 | static inline void intel_unregister_dsm_handler(void) { return; } |
2779 | #endif /* CONFIG_ACPI */ |
2780 | |
2781 | /* modesetting */ |
2782 | extern void i915_disable_vga(struct drm_device *dev); |
2783 | extern void intel_modeset_init_hw(struct drm_device *dev); |
2784 | extern void intel_modeset_suspend_hw(struct drm_device *dev); |
2785 | extern void intel_modeset_init(struct drm_device *dev); |
2786 | extern void intel_modeset_gem_init(struct drm_device *dev); |
2787 | extern void intel_modeset_cleanup(struct drm_device *dev); |
2788 | extern void intel_connector_unregister(struct intel_connector *); |
2789 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
2790 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
2791 | bool force_restore); |
2792 | extern void i915_redisable_vga(struct drm_device *dev); |
2793 | extern void i915_redisable_vga_power_on(struct drm_device *dev); |
2794 | extern bool intel_fbc_enabled(struct drm_device *dev); |
2795 | extern void intel_disable_fbc(struct drm_device *dev); |
2796 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
2797 | extern void intel_init_pch_refclk(struct drm_device *dev); |
2798 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
2799 | extern void valleyview_set_rps(struct drm_device *dev, u8 val); |
2800 | extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); |
2801 | extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); |
2802 | extern void intel_detect_pch(struct drm_device *dev); |
2803 | extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); |
2804 | extern int intel_enable_rc6(const struct drm_device *dev); |
2805 | |
2806 | extern bool i915_semaphore_is_enabled(struct drm_device *dev); |
2807 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
2808 | struct drm_file *file); |
2809 | int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, |
2810 | struct drm_file *file); |
2811 | |
2812 | /* overlay */ |
2813 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
2814 | extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, |
2815 | struct intel_overlay_error_state *error); |
2816 | |
2817 | extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); |
2818 | extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, |
2819 | struct drm_device *dev, |
2820 | struct intel_display_error_state *error); |
2821 | |
2822 | /* On SNB platform, before reading ring registers forcewake bit |
2823 | * must be set to prevent GT core from power down and stale values being |
2824 | * returned. |
2825 | */ |
2826 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); |
2827 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); |
2828 | void assert_force_wake_inactive(struct drm_i915_private *dev_priv); |
2829 | |
2830 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); |
2831 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); |
2832 | |
2833 | /* intel_sideband.c */ |
2834 | u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); |
2835 | void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); |
2836 | u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); |
2837 | u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); |
2838 | void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
2839 | u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); |
2840 | void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
2841 | u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); |
2842 | void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
2843 | u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); |
2844 | void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
2845 | u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); |
2846 | void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
2847 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); |
2848 | void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); |
2849 | u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
2850 | enum intel_sbi_destination destination); |
2851 | void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
2852 | enum intel_sbi_destination destination); |
2853 | u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); |
2854 | void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
2855 | |
2856 | int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); |
2857 | int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); |
2858 | |
2859 | void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); |
2860 | void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); |
2861 | |
2862 | #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ |
2863 | (((reg) >= 0x2000 && (reg) < 0x4000) ||\ |
2864 | ((reg) >= 0x5000 && (reg) < 0x8000) ||\ |
2865 | ((reg) >= 0xB000 && (reg) < 0x12000) ||\ |
2866 | ((reg) >= 0x2E000 && (reg) < 0x30000)) |
2867 | |
2868 | #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ |
2869 | (((reg) >= 0x12000 && (reg) < 0x14000) ||\ |
2870 | ((reg) >= 0x22000 && (reg) < 0x24000) ||\ |
2871 | ((reg) >= 0x30000 && (reg) < 0x40000)) |
2872 | |
2873 | #define FORCEWAKE_RENDER (1 << 0) |
2874 | #define FORCEWAKE_MEDIA (1 << 1) |
2875 | #define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA) |
2876 | |
2877 | |
2878 | #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) |
2879 | #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) |
2880 | |
2881 | #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) |
2882 | #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) |
2883 | #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) |
2884 | #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) |
2885 | |
2886 | #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) |
2887 | #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) |
2888 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) |
2889 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) |
2890 | |
2891 | /* Be very careful with read/write 64-bit values. On 32-bit machines, they |
2892 | * will be implemented using 2 32-bit writes in an arbitrary order with |
2893 | * an arbitrary delay between them. This can cause the hardware to |
2894 | * act upon the intermediate value, possibly leading to corruption and |
2895 | * machine death. You have been warned. |
2896 | */ |
2897 | #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) |
2898 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) |
2899 | |
2900 | #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ |
2901 | u32 upper = I915_READ(upper_reg); \ |
2902 | u32 lower = I915_READ(lower_reg); \ |
2903 | u32 tmp = I915_READ(upper_reg); \ |
2904 | if (upper != tmp) { \ |
2905 | upper = tmp; \ |
2906 | lower = I915_READ(lower_reg); \ |
2907 | WARN_ON(I915_READ(upper_reg) != upper); \ |
2908 | } \ |
2909 | (u64)upper << 32 | lower; }) |
2910 | |
2911 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
2912 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
2913 | |
2914 | /* "Broadcast RGB" property */ |
2915 | #define INTEL_BROADCAST_RGB_AUTO 0 |
2916 | #define INTEL_BROADCAST_RGB_FULL 1 |
2917 | #define INTEL_BROADCAST_RGB_LIMITED 2 |
2918 | |
2919 | static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) |
2920 | { |
2921 | if (HAS_PCH_SPLIT(dev)) |
2922 | return CPU_VGACNTRL; |
2923 | else if (IS_VALLEYVIEW(dev)) |
2924 | return VLV_VGACNTRL; |
2925 | else |
2926 | return VGACNTRL; |
2927 | } |
2928 | |
2929 | static inline void __user *to_user_ptr(u64 address) |
2930 | { |
2931 | return (void __user *)(uintptr_t)address; |
2932 | } |
2933 | |
2934 | static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) |
2935 | { |
2936 | unsigned long j = msecs_to_jiffies(m); |
2937 | |
2938 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
2939 | } |
2940 | |
2941 | static inline unsigned long |
2942 | timespec_to_jiffies_timeout(const struct timespec *value) |
2943 | { |
2944 | unsigned long j = timespec_to_jiffies(value); |
2945 | |
2946 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
2947 | } |
2948 | |
2949 | /* |
2950 | * If you need to wait X milliseconds between events A and B, but event B |
2951 | * doesn't happen exactly after event A, you record the timestamp (jiffies) of |
2952 | * when event A happened, then just before event B you call this function and |
2953 | * pass the timestamp as the first argument, and X as the second argument. |
2954 | */ |
2955 | static inline void |
2956 | wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) |
2957 | { |
2958 | unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; |
2959 | |
2960 | /* |
2961 | * Don't re-read the value of "jiffies" every time since it may change |
2962 | * behind our back and break the math. |
2963 | */ |
2964 | tmp_jiffies = jiffies; |
2965 | target_jiffies = timestamp_jiffies + |
2966 | msecs_to_jiffies_timeout(to_wait_ms); |
2967 | |
2968 | if (time_after(target_jiffies, tmp_jiffies)) { |
2969 | remaining_jiffies = target_jiffies - tmp_jiffies; |
2970 | while (remaining_jiffies) |
2971 | remaining_jiffies = |
2972 | schedule_timeout_uninterruptible(remaining_jiffies); |
2973 | } |
2974 | } |
2975 | |
2976 | #endif |
2977 | |