1 | /* |
2 | * Copyright © 2013 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | */ |
23 | |
24 | #include <linux/err.h> |
25 | #include "i915_drv.h" |
26 | #include "intel_drv.h" |
27 | |
28 | #define FORCEWAKE_ACK_TIMEOUT_MS 2 |
29 | |
30 | #ifdef __NetBSD__ |
31 | |
32 | #define __raw_i915_read8(dev_priv, reg) bus_space_read_1((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg)) |
33 | #define __raw_i915_write8(dev_priv, reg, val) bus_space_write_1((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg), (val)) |
34 | |
35 | #define __raw_i915_read16(dev_priv, reg) bus_space_read_2((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg)) |
36 | #define __raw_i915_write16(dev_priv, reg, val) bus_space_write_2((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg), (val)) |
37 | |
38 | #define __raw_i915_read32(dev_priv, reg) bus_space_read_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg)) |
39 | #define __raw_i915_write32(dev_priv, reg, val) bus_space_write_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg), (val)) |
40 | |
41 | #ifdef _LP64 |
42 | #define __raw_i915_read64(dev_priv, reg) bus_space_read_8((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg)) |
43 | #define __raw_i915_write64(dev_priv, reg, val) bus_space_write_8((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg), (val)) |
44 | #else |
45 | #define __raw_i915_read64(dev_priv, reg) (bus_space_read_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg)) | ((uint64_t)bus_space_read_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg) + 4) << 32)) |
46 | #define __raw_i915_write64(dev_priv, reg, val) (bus_space_write_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg), (uint32_t)(val)), bus_space_write_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg) + 4, (uint32_t)((val) >> 32))) |
47 | #endif |
48 | |
49 | #else |
50 | |
51 | #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) |
52 | #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) |
53 | |
54 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) |
55 | #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) |
56 | |
57 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) |
58 | #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) |
59 | |
60 | #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) |
61 | #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) |
62 | |
63 | #endif |
64 | |
65 | #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) |
66 | |
67 | static void |
68 | assert_device_not_suspended(struct drm_i915_private *dev_priv) |
69 | { |
70 | WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, |
71 | "Device suspended\n" ); |
72 | } |
73 | |
74 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) |
75 | { |
76 | u32 gt_thread_status_mask; |
77 | |
78 | if (IS_HASWELL(dev_priv->dev)) |
79 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; |
80 | else |
81 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; |
82 | |
83 | /* w/a for a sporadic read returning 0 by waiting for the GT |
84 | * thread to wake up. |
85 | */ |
86 | if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) |
87 | DRM_ERROR("GT thread status wait timed out\n" ); |
88 | } |
89 | |
90 | static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) |
91 | { |
92 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); |
93 | /* something from same cacheline, but !FORCEWAKE */ |
94 | __raw_posting_read(dev_priv, ECOBUS); |
95 | } |
96 | |
97 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, |
98 | int fw_engine) |
99 | { |
100 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, |
101 | FORCEWAKE_ACK_TIMEOUT_MS)) |
102 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n" ); |
103 | |
104 | __raw_i915_write32(dev_priv, FORCEWAKE, 1); |
105 | /* something from same cacheline, but !FORCEWAKE */ |
106 | __raw_posting_read(dev_priv, ECOBUS); |
107 | |
108 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), |
109 | FORCEWAKE_ACK_TIMEOUT_MS)) |
110 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n" ); |
111 | |
112 | /* WaRsForcewakeWaitTC0:snb */ |
113 | __gen6_gt_wait_for_thread_c0(dev_priv); |
114 | } |
115 | |
116 | static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) |
117 | { |
118 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); |
119 | /* something from same cacheline, but !FORCEWAKE_MT */ |
120 | __raw_posting_read(dev_priv, ECOBUS); |
121 | } |
122 | |
123 | static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, |
124 | int fw_engine) |
125 | { |
126 | u32 forcewake_ack; |
127 | |
128 | if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) |
129 | forcewake_ack = FORCEWAKE_ACK_HSW; |
130 | else |
131 | forcewake_ack = FORCEWAKE_MT_ACK; |
132 | |
133 | if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, |
134 | FORCEWAKE_ACK_TIMEOUT_MS)) |
135 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n" ); |
136 | |
137 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, |
138 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
139 | /* something from same cacheline, but !FORCEWAKE_MT */ |
140 | __raw_posting_read(dev_priv, ECOBUS); |
141 | |
142 | if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), |
143 | FORCEWAKE_ACK_TIMEOUT_MS)) |
144 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n" ); |
145 | |
146 | /* WaRsForcewakeWaitTC0:ivb,hsw */ |
147 | if (INTEL_INFO(dev_priv->dev)->gen < 8) |
148 | __gen6_gt_wait_for_thread_c0(dev_priv); |
149 | } |
150 | |
151 | static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) |
152 | { |
153 | u32 gtfifodbg; |
154 | |
155 | gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); |
156 | if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n" , gtfifodbg)) |
157 | __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); |
158 | } |
159 | |
160 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, |
161 | int fw_engine) |
162 | { |
163 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); |
164 | /* something from same cacheline, but !FORCEWAKE */ |
165 | __raw_posting_read(dev_priv, ECOBUS); |
166 | gen6_gt_check_fifodbg(dev_priv); |
167 | } |
168 | |
169 | static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, |
170 | int fw_engine) |
171 | { |
172 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, |
173 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
174 | /* something from same cacheline, but !FORCEWAKE_MT */ |
175 | __raw_posting_read(dev_priv, ECOBUS); |
176 | |
177 | if (IS_GEN7(dev_priv->dev)) |
178 | gen6_gt_check_fifodbg(dev_priv); |
179 | } |
180 | |
181 | static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
182 | { |
183 | int ret = 0; |
184 | |
185 | /* On VLV, FIFO will be shared by both SW and HW. |
186 | * So, we need to read the FREE_ENTRIES everytime */ |
187 | if (IS_VALLEYVIEW(dev_priv->dev)) |
188 | dev_priv->uncore.fifo_count = |
189 | __raw_i915_read32(dev_priv, GTFIFOCTL) & |
190 | GT_FIFO_FREE_ENTRIES_MASK; |
191 | |
192 | if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { |
193 | int loop = 500; |
194 | u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; |
195 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { |
196 | udelay(10); |
197 | fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; |
198 | } |
199 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) |
200 | ++ret; |
201 | dev_priv->uncore.fifo_count = fifo; |
202 | } |
203 | dev_priv->uncore.fifo_count--; |
204 | |
205 | return ret; |
206 | } |
207 | |
208 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) |
209 | { |
210 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
211 | _MASKED_BIT_DISABLE(0xffff)); |
212 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, |
213 | _MASKED_BIT_DISABLE(0xffff)); |
214 | /* something from same cacheline, but !FORCEWAKE_VLV */ |
215 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); |
216 | } |
217 | |
218 | static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, |
219 | int fw_engine) |
220 | { |
221 | /* Check for Render Engine */ |
222 | if (FORCEWAKE_RENDER & fw_engine) { |
223 | if (wait_for_atomic((__raw_i915_read32(dev_priv, |
224 | FORCEWAKE_ACK_VLV) & |
225 | FORCEWAKE_KERNEL) == 0, |
226 | FORCEWAKE_ACK_TIMEOUT_MS)) |
227 | DRM_ERROR("Timed out: Render forcewake old ack to clear.\n" ); |
228 | |
229 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
230 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
231 | |
232 | if (wait_for_atomic((__raw_i915_read32(dev_priv, |
233 | FORCEWAKE_ACK_VLV) & |
234 | FORCEWAKE_KERNEL), |
235 | FORCEWAKE_ACK_TIMEOUT_MS)) |
236 | DRM_ERROR("Timed out: waiting for Render to ack.\n" ); |
237 | } |
238 | |
239 | /* Check for Media Engine */ |
240 | if (FORCEWAKE_MEDIA & fw_engine) { |
241 | if (wait_for_atomic((__raw_i915_read32(dev_priv, |
242 | FORCEWAKE_ACK_MEDIA_VLV) & |
243 | FORCEWAKE_KERNEL) == 0, |
244 | FORCEWAKE_ACK_TIMEOUT_MS)) |
245 | DRM_ERROR("Timed out: Media forcewake old ack to clear.\n" ); |
246 | |
247 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, |
248 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
249 | |
250 | if (wait_for_atomic((__raw_i915_read32(dev_priv, |
251 | FORCEWAKE_ACK_MEDIA_VLV) & |
252 | FORCEWAKE_KERNEL), |
253 | FORCEWAKE_ACK_TIMEOUT_MS)) |
254 | DRM_ERROR("Timed out: waiting for media to ack.\n" ); |
255 | } |
256 | |
257 | /* WaRsForcewakeWaitTC0:vlv */ |
258 | __gen6_gt_wait_for_thread_c0(dev_priv); |
259 | |
260 | } |
261 | |
262 | static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, |
263 | int fw_engine) |
264 | { |
265 | |
266 | /* Check for Render Engine */ |
267 | if (FORCEWAKE_RENDER & fw_engine) |
268 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
269 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
270 | |
271 | |
272 | /* Check for Media Engine */ |
273 | if (FORCEWAKE_MEDIA & fw_engine) |
274 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, |
275 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
276 | |
277 | /* The below doubles as a POSTING_READ */ |
278 | gen6_gt_check_fifodbg(dev_priv); |
279 | |
280 | } |
281 | |
282 | void vlv_force_wake_get(struct drm_i915_private *dev_priv, |
283 | int fw_engine) |
284 | { |
285 | unsigned long irqflags; |
286 | |
287 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
288 | |
289 | if (fw_engine & FORCEWAKE_RENDER && |
290 | dev_priv->uncore.fw_rendercount++ != 0) |
291 | fw_engine &= ~FORCEWAKE_RENDER; |
292 | if (fw_engine & FORCEWAKE_MEDIA && |
293 | dev_priv->uncore.fw_mediacount++ != 0) |
294 | fw_engine &= ~FORCEWAKE_MEDIA; |
295 | |
296 | if (fw_engine) |
297 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); |
298 | |
299 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
300 | } |
301 | |
302 | void vlv_force_wake_put(struct drm_i915_private *dev_priv, |
303 | int fw_engine) |
304 | { |
305 | unsigned long irqflags; |
306 | |
307 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
308 | |
309 | if (fw_engine & FORCEWAKE_RENDER) { |
310 | WARN_ON(!dev_priv->uncore.fw_rendercount); |
311 | if (--dev_priv->uncore.fw_rendercount != 0) |
312 | fw_engine &= ~FORCEWAKE_RENDER; |
313 | } |
314 | |
315 | if (fw_engine & FORCEWAKE_MEDIA) { |
316 | WARN_ON(!dev_priv->uncore.fw_mediacount); |
317 | if (--dev_priv->uncore.fw_mediacount != 0) |
318 | fw_engine &= ~FORCEWAKE_MEDIA; |
319 | } |
320 | |
321 | if (fw_engine) |
322 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); |
323 | |
324 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
325 | } |
326 | |
327 | static void gen6_force_wake_timer(unsigned long arg) |
328 | { |
329 | struct drm_i915_private *dev_priv = (void *)arg; |
330 | unsigned long irqflags; |
331 | |
332 | assert_device_not_suspended(dev_priv); |
333 | |
334 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
335 | WARN_ON(!dev_priv->uncore.forcewake_count); |
336 | |
337 | if (--dev_priv->uncore.forcewake_count == 0) |
338 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); |
339 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
340 | |
341 | intel_runtime_pm_put(dev_priv); |
342 | } |
343 | |
344 | static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) |
345 | { |
346 | struct drm_i915_private *dev_priv = dev->dev_private; |
347 | unsigned long irqflags; |
348 | |
349 | del_timer_sync(&dev_priv->uncore.force_wake_timer); |
350 | |
351 | /* Hold uncore.lock across reset to prevent any register access |
352 | * with forcewake not set correctly |
353 | */ |
354 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
355 | |
356 | if (IS_VALLEYVIEW(dev)) |
357 | vlv_force_wake_reset(dev_priv); |
358 | else if (IS_GEN6(dev) || IS_GEN7(dev)) |
359 | __gen6_gt_force_wake_reset(dev_priv); |
360 | |
361 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) |
362 | __gen7_gt_force_wake_mt_reset(dev_priv); |
363 | |
364 | if (restore) { /* If reset with a user forcewake, try to restore */ |
365 | unsigned fw = 0; |
366 | |
367 | if (IS_VALLEYVIEW(dev)) { |
368 | if (dev_priv->uncore.fw_rendercount) |
369 | fw |= FORCEWAKE_RENDER; |
370 | |
371 | if (dev_priv->uncore.fw_mediacount) |
372 | fw |= FORCEWAKE_MEDIA; |
373 | } else { |
374 | if (dev_priv->uncore.forcewake_count) |
375 | fw = FORCEWAKE_ALL; |
376 | } |
377 | |
378 | if (fw) |
379 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); |
380 | |
381 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
382 | dev_priv->uncore.fifo_count = |
383 | __raw_i915_read32(dev_priv, GTFIFOCTL) & |
384 | GT_FIFO_FREE_ENTRIES_MASK; |
385 | } else { |
386 | dev_priv->uncore.forcewake_count = 0; |
387 | dev_priv->uncore.fw_rendercount = 0; |
388 | dev_priv->uncore.fw_mediacount = 0; |
389 | } |
390 | |
391 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
392 | } |
393 | |
394 | void intel_uncore_early_sanitize(struct drm_device *dev) |
395 | { |
396 | struct drm_i915_private *dev_priv = dev->dev_private; |
397 | |
398 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) |
399 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
400 | |
401 | if (IS_HASWELL(dev) && |
402 | (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { |
403 | /* The docs do not explain exactly how the calculation can be |
404 | * made. It is somewhat guessable, but for now, it's always |
405 | * 128MB. |
406 | * NB: We can't write IDICR yet because we do not have gt funcs |
407 | * set up */ |
408 | dev_priv->ellc_size = 128; |
409 | DRM_INFO("Found %zuMB of eLLC\n" , dev_priv->ellc_size); |
410 | } |
411 | |
412 | /* clear out old GT FIFO errors */ |
413 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
414 | __raw_i915_write32(dev_priv, GTFIFODBG, |
415 | __raw_i915_read32(dev_priv, GTFIFODBG)); |
416 | |
417 | intel_uncore_forcewake_reset(dev, false); |
418 | } |
419 | |
420 | void intel_uncore_sanitize(struct drm_device *dev) |
421 | { |
422 | struct drm_i915_private *dev_priv = dev->dev_private; |
423 | u32 reg_val; |
424 | |
425 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ |
426 | intel_disable_gt_powersave(dev); |
427 | |
428 | /* Turn off power gate, require especially for the BIOS less system */ |
429 | if (IS_VALLEYVIEW(dev)) { |
430 | |
431 | mutex_lock(&dev_priv->rps.hw_lock); |
432 | reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS); |
433 | |
434 | if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) | |
435 | PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) | |
436 | PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D))) |
437 | vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0); |
438 | |
439 | mutex_unlock(&dev_priv->rps.hw_lock); |
440 | |
441 | } |
442 | } |
443 | |
444 | /* |
445 | * Generally this is called implicitly by the register read function. However, |
446 | * if some sequence requires the GT to not power down then this function should |
447 | * be called at the beginning of the sequence followed by a call to |
448 | * gen6_gt_force_wake_put() at the end of the sequence. |
449 | */ |
450 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) |
451 | { |
452 | unsigned long irqflags; |
453 | |
454 | if (!dev_priv->uncore.funcs.force_wake_get) |
455 | return; |
456 | |
457 | intel_runtime_pm_get(dev_priv); |
458 | |
459 | /* Redirect to VLV specific routine */ |
460 | if (IS_VALLEYVIEW(dev_priv->dev)) |
461 | return vlv_force_wake_get(dev_priv, fw_engine); |
462 | |
463 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
464 | if (dev_priv->uncore.forcewake_count++ == 0) |
465 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); |
466 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
467 | } |
468 | |
469 | /* |
470 | * see gen6_gt_force_wake_get() |
471 | */ |
472 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) |
473 | { |
474 | unsigned long irqflags; |
475 | bool delayed = false; |
476 | |
477 | if (!dev_priv->uncore.funcs.force_wake_put) |
478 | return; |
479 | |
480 | /* Redirect to VLV specific routine */ |
481 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
482 | vlv_force_wake_put(dev_priv, fw_engine); |
483 | goto out; |
484 | } |
485 | |
486 | |
487 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
488 | WARN_ON(!dev_priv->uncore.forcewake_count); |
489 | |
490 | if (--dev_priv->uncore.forcewake_count == 0) { |
491 | dev_priv->uncore.forcewake_count++; |
492 | delayed = true; |
493 | mod_timer_pinned(&dev_priv->uncore.force_wake_timer, |
494 | jiffies + 1); |
495 | } |
496 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
497 | |
498 | out: |
499 | if (!delayed) |
500 | intel_runtime_pm_put(dev_priv); |
501 | } |
502 | |
503 | void assert_force_wake_inactive(struct drm_i915_private *dev_priv) |
504 | { |
505 | if (!dev_priv->uncore.funcs.force_wake_get) |
506 | return; |
507 | |
508 | WARN_ON(dev_priv->uncore.forcewake_count > 0); |
509 | } |
510 | |
511 | /* We give fast paths for the really cool registers */ |
512 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
513 | ((reg) < 0x40000 && (reg) != FORCEWAKE) |
514 | |
515 | static void |
516 | ilk_dummy_write(struct drm_i915_private *dev_priv) |
517 | { |
518 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up |
519 | * the chip from rc6 before touching it for real. MI_MODE is masked, |
520 | * hence harmless to write 0 into. */ |
521 | __raw_i915_write32(dev_priv, MI_MODE, 0); |
522 | } |
523 | |
524 | static void |
525 | hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) |
526 | { |
527 | if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { |
528 | DRM_ERROR("Unknown unclaimed register before writing to %x\n" , |
529 | reg); |
530 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
531 | } |
532 | } |
533 | |
534 | static void |
535 | hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) |
536 | { |
537 | if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { |
538 | DRM_ERROR("Unclaimed write to %x\n" , reg); |
539 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
540 | } |
541 | } |
542 | |
543 | #define (x) \ |
544 | unsigned long irqflags; \ |
545 | u##x val = 0; \ |
546 | assert_device_not_suspended(dev_priv); \ |
547 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
548 | |
549 | #define \ |
550 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ |
551 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
552 | return val |
553 | |
554 | #define __gen4_read(x) \ |
555 | static u##x \ |
556 | gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ |
557 | REG_READ_HEADER(x); \ |
558 | val = __raw_i915_read##x(dev_priv, reg); \ |
559 | REG_READ_FOOTER; \ |
560 | } |
561 | |
562 | #define __gen5_read(x) \ |
563 | static u##x \ |
564 | gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ |
565 | REG_READ_HEADER(x); \ |
566 | ilk_dummy_write(dev_priv); \ |
567 | val = __raw_i915_read##x(dev_priv, reg); \ |
568 | REG_READ_FOOTER; \ |
569 | } |
570 | |
571 | #define __gen6_read(x) \ |
572 | static u##x \ |
573 | gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ |
574 | REG_READ_HEADER(x); \ |
575 | if (dev_priv->uncore.forcewake_count == 0 && \ |
576 | NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
577 | dev_priv->uncore.funcs.force_wake_get(dev_priv, \ |
578 | FORCEWAKE_ALL); \ |
579 | val = __raw_i915_read##x(dev_priv, reg); \ |
580 | dev_priv->uncore.funcs.force_wake_put(dev_priv, \ |
581 | FORCEWAKE_ALL); \ |
582 | } else { \ |
583 | val = __raw_i915_read##x(dev_priv, reg); \ |
584 | } \ |
585 | REG_READ_FOOTER; \ |
586 | } |
587 | |
588 | #define __vlv_read(x) \ |
589 | static u##x \ |
590 | vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ |
591 | unsigned fwengine = 0; \ |
592 | REG_READ_HEADER(x); \ |
593 | if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ |
594 | if (dev_priv->uncore.fw_rendercount == 0) \ |
595 | fwengine = FORCEWAKE_RENDER; \ |
596 | } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ |
597 | if (dev_priv->uncore.fw_mediacount == 0) \ |
598 | fwengine = FORCEWAKE_MEDIA; \ |
599 | } \ |
600 | if (fwengine) \ |
601 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ |
602 | val = __raw_i915_read##x(dev_priv, reg); \ |
603 | if (fwengine) \ |
604 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ |
605 | REG_READ_FOOTER; \ |
606 | } |
607 | |
608 | |
609 | __vlv_read(8) |
610 | __vlv_read(16) |
611 | __vlv_read(32) |
612 | __vlv_read(64) |
613 | __gen6_read(8) |
614 | __gen6_read(16) |
615 | __gen6_read(32) |
616 | __gen6_read(64) |
617 | __gen5_read(8) |
618 | __gen5_read(16) |
619 | __gen5_read(32) |
620 | __gen5_read(64) |
621 | __gen4_read(8) |
622 | __gen4_read(16) |
623 | __gen4_read(32) |
624 | __gen4_read(64) |
625 | |
626 | #undef __vlv_read |
627 | #undef __gen6_read |
628 | #undef __gen5_read |
629 | #undef __gen4_read |
630 | #undef REG_READ_FOOTER |
631 | #undef REG_READ_HEADER |
632 | |
633 | #define \ |
634 | unsigned long irqflags; \ |
635 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
636 | assert_device_not_suspended(dev_priv); \ |
637 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
638 | |
639 | #define \ |
640 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) |
641 | |
642 | #define __gen4_write(x) \ |
643 | static void \ |
644 | gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
645 | REG_WRITE_HEADER; \ |
646 | __raw_i915_write##x(dev_priv, reg, val); \ |
647 | REG_WRITE_FOOTER; \ |
648 | } |
649 | |
650 | #define __gen5_write(x) \ |
651 | static void \ |
652 | gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
653 | REG_WRITE_HEADER; \ |
654 | ilk_dummy_write(dev_priv); \ |
655 | __raw_i915_write##x(dev_priv, reg, val); \ |
656 | REG_WRITE_FOOTER; \ |
657 | } |
658 | |
659 | #define __gen6_write(x) \ |
660 | static void \ |
661 | gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
662 | u32 __fifo_ret = 0; \ |
663 | REG_WRITE_HEADER; \ |
664 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
665 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
666 | } \ |
667 | __raw_i915_write##x(dev_priv, reg, val); \ |
668 | if (unlikely(__fifo_ret)) { \ |
669 | gen6_gt_check_fifodbg(dev_priv); \ |
670 | } \ |
671 | REG_WRITE_FOOTER; \ |
672 | } |
673 | |
674 | #define __hsw_write(x) \ |
675 | static void \ |
676 | hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
677 | u32 __fifo_ret = 0; \ |
678 | REG_WRITE_HEADER; \ |
679 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
680 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
681 | } \ |
682 | hsw_unclaimed_reg_clear(dev_priv, reg); \ |
683 | __raw_i915_write##x(dev_priv, reg, val); \ |
684 | if (unlikely(__fifo_ret)) { \ |
685 | gen6_gt_check_fifodbg(dev_priv); \ |
686 | } \ |
687 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
688 | REG_WRITE_FOOTER; \ |
689 | } |
690 | |
691 | static const u32 gen8_shadowed_regs[] = { |
692 | FORCEWAKE_MT, |
693 | GEN6_RPNSWREQ, |
694 | GEN6_RC_VIDEO_FREQ, |
695 | RING_TAIL(RENDER_RING_BASE), |
696 | RING_TAIL(GEN6_BSD_RING_BASE), |
697 | RING_TAIL(VEBOX_RING_BASE), |
698 | RING_TAIL(BLT_RING_BASE), |
699 | /* TODO: Other registers are not yet used */ |
700 | }; |
701 | |
702 | static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) |
703 | { |
704 | int i; |
705 | for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) |
706 | if (reg == gen8_shadowed_regs[i]) |
707 | return true; |
708 | |
709 | return false; |
710 | } |
711 | |
712 | #define __gen8_write(x) \ |
713 | static void \ |
714 | gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
715 | REG_WRITE_HEADER; \ |
716 | if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ |
717 | if (dev_priv->uncore.forcewake_count == 0) \ |
718 | dev_priv->uncore.funcs.force_wake_get(dev_priv, \ |
719 | FORCEWAKE_ALL); \ |
720 | __raw_i915_write##x(dev_priv, reg, val); \ |
721 | if (dev_priv->uncore.forcewake_count == 0) \ |
722 | dev_priv->uncore.funcs.force_wake_put(dev_priv, \ |
723 | FORCEWAKE_ALL); \ |
724 | } else { \ |
725 | __raw_i915_write##x(dev_priv, reg, val); \ |
726 | } \ |
727 | REG_WRITE_FOOTER; \ |
728 | } |
729 | |
730 | __gen8_write(8) |
731 | __gen8_write(16) |
732 | __gen8_write(32) |
733 | __gen8_write(64) |
734 | __hsw_write(8) |
735 | __hsw_write(16) |
736 | __hsw_write(32) |
737 | __hsw_write(64) |
738 | __gen6_write(8) |
739 | __gen6_write(16) |
740 | __gen6_write(32) |
741 | __gen6_write(64) |
742 | __gen5_write(8) |
743 | __gen5_write(16) |
744 | __gen5_write(32) |
745 | __gen5_write(64) |
746 | __gen4_write(8) |
747 | __gen4_write(16) |
748 | __gen4_write(32) |
749 | __gen4_write(64) |
750 | |
751 | #undef __gen8_write |
752 | #undef __hsw_write |
753 | #undef __gen6_write |
754 | #undef __gen5_write |
755 | #undef __gen4_write |
756 | #undef REG_WRITE_FOOTER |
757 | #undef REG_WRITE_HEADER |
758 | |
759 | void intel_uncore_init(struct drm_device *dev) |
760 | { |
761 | struct drm_i915_private *dev_priv = dev->dev_private; |
762 | |
763 | setup_timer(&dev_priv->uncore.force_wake_timer, |
764 | gen6_force_wake_timer, (unsigned long)dev_priv); |
765 | |
766 | intel_uncore_early_sanitize(dev); |
767 | |
768 | if (IS_VALLEYVIEW(dev)) { |
769 | dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; |
770 | dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; |
771 | } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { |
772 | dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; |
773 | dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; |
774 | } else if (IS_IVYBRIDGE(dev)) { |
775 | u32 ecobus; |
776 | |
777 | /* IVB configs may use multi-threaded forcewake */ |
778 | |
779 | /* A small trick here - if the bios hasn't configured |
780 | * MT forcewake, and if the device is in RC6, then |
781 | * force_wake_mt_get will not wake the device and the |
782 | * ECOBUS read will return zero. Which will be |
783 | * (correctly) interpreted by the test below as MT |
784 | * forcewake being disabled. |
785 | */ |
786 | mutex_lock(&dev->struct_mutex); |
787 | __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); |
788 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); |
789 | __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); |
790 | mutex_unlock(&dev->struct_mutex); |
791 | |
792 | if (ecobus & FORCEWAKE_MT_ENABLE) { |
793 | dev_priv->uncore.funcs.force_wake_get = |
794 | __gen7_gt_force_wake_mt_get; |
795 | dev_priv->uncore.funcs.force_wake_put = |
796 | __gen7_gt_force_wake_mt_put; |
797 | } else { |
798 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n" ); |
799 | DRM_INFO("when using vblank-synced partial screen updates.\n" ); |
800 | dev_priv->uncore.funcs.force_wake_get = |
801 | __gen6_gt_force_wake_get; |
802 | dev_priv->uncore.funcs.force_wake_put = |
803 | __gen6_gt_force_wake_put; |
804 | } |
805 | } else if (IS_GEN6(dev)) { |
806 | dev_priv->uncore.funcs.force_wake_get = |
807 | __gen6_gt_force_wake_get; |
808 | dev_priv->uncore.funcs.force_wake_put = |
809 | __gen6_gt_force_wake_put; |
810 | } |
811 | |
812 | switch (INTEL_INFO(dev)->gen) { |
813 | default: |
814 | dev_priv->uncore.funcs.mmio_writeb = gen8_write8; |
815 | dev_priv->uncore.funcs.mmio_writew = gen8_write16; |
816 | dev_priv->uncore.funcs.mmio_writel = gen8_write32; |
817 | dev_priv->uncore.funcs.mmio_writeq = gen8_write64; |
818 | dev_priv->uncore.funcs.mmio_readb = gen6_read8; |
819 | dev_priv->uncore.funcs.mmio_readw = gen6_read16; |
820 | dev_priv->uncore.funcs.mmio_readl = gen6_read32; |
821 | dev_priv->uncore.funcs.mmio_readq = gen6_read64; |
822 | break; |
823 | case 7: |
824 | case 6: |
825 | if (IS_HASWELL(dev)) { |
826 | dev_priv->uncore.funcs.mmio_writeb = hsw_write8; |
827 | dev_priv->uncore.funcs.mmio_writew = hsw_write16; |
828 | dev_priv->uncore.funcs.mmio_writel = hsw_write32; |
829 | dev_priv->uncore.funcs.mmio_writeq = hsw_write64; |
830 | } else { |
831 | dev_priv->uncore.funcs.mmio_writeb = gen6_write8; |
832 | dev_priv->uncore.funcs.mmio_writew = gen6_write16; |
833 | dev_priv->uncore.funcs.mmio_writel = gen6_write32; |
834 | dev_priv->uncore.funcs.mmio_writeq = gen6_write64; |
835 | } |
836 | |
837 | if (IS_VALLEYVIEW(dev)) { |
838 | dev_priv->uncore.funcs.mmio_readb = vlv_read8; |
839 | dev_priv->uncore.funcs.mmio_readw = vlv_read16; |
840 | dev_priv->uncore.funcs.mmio_readl = vlv_read32; |
841 | dev_priv->uncore.funcs.mmio_readq = vlv_read64; |
842 | } else { |
843 | dev_priv->uncore.funcs.mmio_readb = gen6_read8; |
844 | dev_priv->uncore.funcs.mmio_readw = gen6_read16; |
845 | dev_priv->uncore.funcs.mmio_readl = gen6_read32; |
846 | dev_priv->uncore.funcs.mmio_readq = gen6_read64; |
847 | } |
848 | break; |
849 | case 5: |
850 | dev_priv->uncore.funcs.mmio_writeb = gen5_write8; |
851 | dev_priv->uncore.funcs.mmio_writew = gen5_write16; |
852 | dev_priv->uncore.funcs.mmio_writel = gen5_write32; |
853 | dev_priv->uncore.funcs.mmio_writeq = gen5_write64; |
854 | dev_priv->uncore.funcs.mmio_readb = gen5_read8; |
855 | dev_priv->uncore.funcs.mmio_readw = gen5_read16; |
856 | dev_priv->uncore.funcs.mmio_readl = gen5_read32; |
857 | dev_priv->uncore.funcs.mmio_readq = gen5_read64; |
858 | break; |
859 | case 4: |
860 | case 3: |
861 | case 2: |
862 | dev_priv->uncore.funcs.mmio_writeb = gen4_write8; |
863 | dev_priv->uncore.funcs.mmio_writew = gen4_write16; |
864 | dev_priv->uncore.funcs.mmio_writel = gen4_write32; |
865 | dev_priv->uncore.funcs.mmio_writeq = gen4_write64; |
866 | dev_priv->uncore.funcs.mmio_readb = gen4_read8; |
867 | dev_priv->uncore.funcs.mmio_readw = gen4_read16; |
868 | dev_priv->uncore.funcs.mmio_readl = gen4_read32; |
869 | dev_priv->uncore.funcs.mmio_readq = gen4_read64; |
870 | break; |
871 | } |
872 | } |
873 | |
874 | void intel_uncore_fini(struct drm_device *dev) |
875 | { |
876 | /* Paranoia: make sure we have disabled everything before we exit. */ |
877 | intel_uncore_sanitize(dev); |
878 | intel_uncore_forcewake_reset(dev, false); |
879 | } |
880 | |
881 | void intel_uncore_destroy(struct drm_device *dev) |
882 | { |
883 | #ifdef __NetBSD__ |
884 | struct drm_i915_private *const dev_priv = dev->dev_private; |
885 | |
886 | teardown_timer(&dev_priv->uncore.force_wake_timer); |
887 | #endif |
888 | } |
889 | |
890 | static const struct register_whitelist { |
891 | uint64_t offset; |
892 | uint32_t size; |
893 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ |
894 | } whitelist[] = { |
895 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 }, |
896 | }; |
897 | |
898 | int i915_reg_read_ioctl(struct drm_device *dev, |
899 | void *data, struct drm_file *file) |
900 | { |
901 | struct drm_i915_private *dev_priv = dev->dev_private; |
902 | struct drm_i915_reg_read *reg = data; |
903 | struct register_whitelist const *entry = whitelist; |
904 | int i, ret = 0; |
905 | |
906 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { |
907 | if (entry->offset == reg->offset && |
908 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) |
909 | break; |
910 | } |
911 | |
912 | if (i == ARRAY_SIZE(whitelist)) |
913 | return -EINVAL; |
914 | |
915 | intel_runtime_pm_get(dev_priv); |
916 | |
917 | switch (entry->size) { |
918 | case 8: |
919 | reg->val = I915_READ64(reg->offset); |
920 | break; |
921 | case 4: |
922 | reg->val = I915_READ(reg->offset); |
923 | break; |
924 | case 2: |
925 | reg->val = I915_READ16(reg->offset); |
926 | break; |
927 | case 1: |
928 | reg->val = I915_READ8(reg->offset); |
929 | break; |
930 | default: |
931 | WARN_ON(1); |
932 | ret = -EINVAL; |
933 | goto out; |
934 | } |
935 | |
936 | out: |
937 | intel_runtime_pm_put(dev_priv); |
938 | return ret; |
939 | } |
940 | |
941 | int i915_get_reset_stats_ioctl(struct drm_device *dev, |
942 | void *data, struct drm_file *file) |
943 | { |
944 | struct drm_i915_private *dev_priv = dev->dev_private; |
945 | struct drm_i915_reset_stats *args = data; |
946 | struct i915_ctx_hang_stats *hs; |
947 | struct i915_hw_context *ctx; |
948 | int ret; |
949 | |
950 | if (args->flags || args->pad) |
951 | return -EINVAL; |
952 | |
953 | #ifdef __NetBSD__ |
954 | if (args->ctx_id == DEFAULT_CONTEXT_ID && !DRM_SUSER()) |
955 | return -EPERM; |
956 | #else |
957 | if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) |
958 | return -EPERM; |
959 | #endif |
960 | |
961 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
962 | if (ret) |
963 | return ret; |
964 | |
965 | ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); |
966 | if (IS_ERR(ctx)) { |
967 | mutex_unlock(&dev->struct_mutex); |
968 | return PTR_ERR(ctx); |
969 | } |
970 | hs = &ctx->hang_stats; |
971 | |
972 | #ifdef __NetBSD__ |
973 | if (DRM_SUSER()) |
974 | #else |
975 | if (capable(CAP_SYS_ADMIN)) |
976 | #endif |
977 | args->reset_count = i915_reset_count(&dev_priv->gpu_error); |
978 | else |
979 | args->reset_count = 0; |
980 | |
981 | args->batch_active = hs->batch_active; |
982 | args->batch_pending = hs->batch_pending; |
983 | |
984 | mutex_unlock(&dev->struct_mutex); |
985 | |
986 | return 0; |
987 | } |
988 | |
989 | static int i965_reset_complete(struct drm_device *dev) |
990 | { |
991 | u8 gdrst; |
992 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); |
993 | return (gdrst & GRDOM_RESET_ENABLE) == 0; |
994 | } |
995 | |
996 | static int i965_do_reset(struct drm_device *dev) |
997 | { |
998 | int ret; |
999 | |
1000 | /* |
1001 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as |
1002 | * well as the reset bit (GR/bit 0). Setting the GR bit |
1003 | * triggers the reset; when done, the hardware will clear it. |
1004 | */ |
1005 | pci_write_config_byte(dev->pdev, I965_GDRST, |
1006 | GRDOM_RENDER | GRDOM_RESET_ENABLE); |
1007 | ret = wait_for(i965_reset_complete(dev), 500); |
1008 | if (ret) |
1009 | return ret; |
1010 | |
1011 | /* We can't reset render&media without also resetting display ... */ |
1012 | pci_write_config_byte(dev->pdev, I965_GDRST, |
1013 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
1014 | |
1015 | ret = wait_for(i965_reset_complete(dev), 500); |
1016 | if (ret) |
1017 | return ret; |
1018 | |
1019 | pci_write_config_byte(dev->pdev, I965_GDRST, 0); |
1020 | |
1021 | return 0; |
1022 | } |
1023 | |
1024 | static int ironlake_do_reset(struct drm_device *dev) |
1025 | { |
1026 | struct drm_i915_private *dev_priv = dev->dev_private; |
1027 | u32 gdrst; |
1028 | int ret; |
1029 | |
1030 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); |
1031 | gdrst &= ~GRDOM_MASK; |
1032 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, |
1033 | gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); |
1034 | ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); |
1035 | if (ret) |
1036 | return ret; |
1037 | |
1038 | /* We can't reset render&media without also resetting display ... */ |
1039 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); |
1040 | gdrst &= ~GRDOM_MASK; |
1041 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, |
1042 | gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
1043 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); |
1044 | } |
1045 | |
1046 | static int gen6_do_reset(struct drm_device *dev) |
1047 | { |
1048 | struct drm_i915_private *dev_priv = dev->dev_private; |
1049 | int ret; |
1050 | |
1051 | /* Reset the chip */ |
1052 | |
1053 | /* GEN6_GDRST is not in the gt power well, no need to check |
1054 | * for fifo space for the write or forcewake the chip for |
1055 | * the read |
1056 | */ |
1057 | __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); |
1058 | |
1059 | /* Spin waiting for the device to ack the reset request */ |
1060 | ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); |
1061 | |
1062 | intel_uncore_forcewake_reset(dev, true); |
1063 | |
1064 | return ret; |
1065 | } |
1066 | |
1067 | int intel_gpu_reset(struct drm_device *dev) |
1068 | { |
1069 | switch (INTEL_INFO(dev)->gen) { |
1070 | case 8: |
1071 | case 7: |
1072 | case 6: return gen6_do_reset(dev); |
1073 | case 5: return ironlake_do_reset(dev); |
1074 | case 4: return i965_do_reset(dev); |
1075 | default: return -ENODEV; |
1076 | } |
1077 | } |
1078 | |
1079 | void intel_uncore_check_errors(struct drm_device *dev) |
1080 | { |
1081 | struct drm_i915_private *dev_priv = dev->dev_private; |
1082 | |
1083 | if (HAS_FPGA_DBG_UNCLAIMED(dev) && |
1084 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { |
1085 | DRM_ERROR("Unclaimed register before interrupt\n" ); |
1086 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
1087 | } |
1088 | } |
1089 | |