1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #include <linux/seq_file.h> |
29 | #include <linux/slab.h> |
30 | #include <drm/drmP.h> |
31 | #include <drm/drm.h> |
32 | #include <drm/drm_crtc_helper.h> |
33 | #include "radeon_reg.h" |
34 | #include "radeon.h" |
35 | #include "radeon_asic.h" |
36 | #include <drm/radeon_drm.h> |
37 | #include "r100_track.h" |
38 | #include "r300d.h" |
39 | #include "rv350d.h" |
40 | #include "r300_reg_safe.h" |
41 | |
42 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 |
43 | * |
44 | * GPU Errata: |
45 | * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL |
46 | * using MMIO to flush host path read cache, this lead to HARDLOCKUP. |
47 | * However, scheduling such write to the ring seems harmless, i suspect |
48 | * the CP read collide with the flush somehow, or maybe the MC, hard to |
49 | * tell. (Jerome Glisse) |
50 | */ |
51 | |
52 | /* |
53 | * rv370,rv380 PCIE GART |
54 | */ |
55 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
56 | |
57 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
58 | { |
59 | uint32_t tmp; |
60 | int i; |
61 | |
62 | /* Workaround HW bug do flush 2 times */ |
63 | for (i = 0; i < 2; i++) { |
64 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
65 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
66 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
67 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
68 | } |
69 | mb(); |
70 | } |
71 | |
72 | #define R300_PTE_WRITEABLE (1 << 2) |
73 | #define R300_PTE_READABLE (1 << 3) |
74 | |
75 | #ifdef __NetBSD__ |
76 | /* |
77 | * XXX Can't use bus_space here because this is all mapped through the |
78 | * radeon_bo abstraction. Can't assume we're x86 because this is |
79 | * AMD/ATI Radeon, not Intel. |
80 | */ |
81 | |
82 | # define __iomem volatile |
83 | # define writel fake_writel |
84 | |
85 | static inline void |
86 | fake_writel(uint32_t v, void __iomem *ptr) |
87 | { |
88 | |
89 | membar_producer(); |
90 | *(uint32_t __iomem *)ptr = v; |
91 | } |
92 | #endif |
93 | |
94 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
95 | { |
96 | void __iomem *ptr = rdev->gart.ptr; |
97 | |
98 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
99 | return -EINVAL; |
100 | } |
101 | addr = (lower_32_bits(addr) >> 8) | |
102 | ((upper_32_bits(addr) & 0xff) << 24) | |
103 | R300_PTE_WRITEABLE | R300_PTE_READABLE; |
104 | /* on x86 we want this to be CPU endian, on powerpc |
105 | * on powerpc without HW swappers, it'll get swapped on way |
106 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
107 | writel(addr, (uint8_t __iomem *)ptr + (i * 4)); |
108 | return 0; |
109 | } |
110 | |
111 | #ifdef __NetBSD__ |
112 | # undef __iomem |
113 | # undef writel |
114 | #endif |
115 | |
116 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
117 | { |
118 | int r; |
119 | |
120 | if (rdev->gart.robj) { |
121 | WARN(1, "RV370 PCIE GART already initialized\n" ); |
122 | return 0; |
123 | } |
124 | /* Initialize common gart structure */ |
125 | r = radeon_gart_init(rdev); |
126 | if (r) |
127 | return r; |
128 | r = rv370_debugfs_pcie_gart_info_init(rdev); |
129 | if (r) |
130 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n" ); |
131 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
132 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
133 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
134 | return radeon_gart_table_vram_alloc(rdev); |
135 | } |
136 | |
137 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
138 | { |
139 | uint32_t table_addr; |
140 | uint32_t tmp; |
141 | int r; |
142 | |
143 | if (rdev->gart.robj == NULL) { |
144 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n" ); |
145 | return -EINVAL; |
146 | } |
147 | r = radeon_gart_table_vram_pin(rdev); |
148 | if (r) |
149 | return r; |
150 | radeon_gart_restore(rdev); |
151 | /* discard memory request outside of configured range */ |
152 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
153 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
154 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); |
155 | tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; |
156 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
157 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
158 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
159 | table_addr = rdev->gart.table_addr; |
160 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); |
161 | /* FIXME: setup default page */ |
162 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); |
163 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
164 | /* Clear error */ |
165 | WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); |
166 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
167 | tmp |= RADEON_PCIE_TX_GART_EN; |
168 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
169 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
170 | rv370_pcie_gart_tlb_flush(rdev); |
171 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n" , |
172 | (unsigned)(rdev->mc.gtt_size >> 20), |
173 | (unsigned long long)table_addr); |
174 | rdev->gart.ready = true; |
175 | return 0; |
176 | } |
177 | |
178 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
179 | { |
180 | u32 tmp; |
181 | |
182 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); |
183 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); |
184 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
185 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
186 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
187 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
188 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
189 | radeon_gart_table_vram_unpin(rdev); |
190 | } |
191 | |
192 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
193 | { |
194 | radeon_gart_fini(rdev); |
195 | rv370_pcie_gart_disable(rdev); |
196 | radeon_gart_table_vram_free(rdev); |
197 | } |
198 | |
199 | void r300_fence_ring_emit(struct radeon_device *rdev, |
200 | struct radeon_fence *fence) |
201 | { |
202 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
203 | |
204 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
205 | * for enough space (today caller are ib schedule and buffer move) */ |
206 | /* Write SC register so SC & US assert idle */ |
207 | radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); |
208 | radeon_ring_write(ring, 0); |
209 | radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); |
210 | radeon_ring_write(ring, 0); |
211 | /* Flush 3D cache */ |
212 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
213 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH); |
214 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
215 | radeon_ring_write(ring, R300_ZC_FLUSH); |
216 | /* Wait until IDLE & CLEAN */ |
217 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
218 | radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | |
219 | RADEON_WAIT_2D_IDLECLEAN | |
220 | RADEON_WAIT_DMA_GUI_IDLE)); |
221 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
222 | radeon_ring_write(ring, rdev->config.r300.hdp_cntl | |
223 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
224 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
225 | radeon_ring_write(ring, rdev->config.r300.hdp_cntl); |
226 | /* Emit fence sequence & fire IRQ */ |
227 | radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); |
228 | radeon_ring_write(ring, fence->seq); |
229 | radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
230 | radeon_ring_write(ring, RADEON_SW_INT_FIRE); |
231 | } |
232 | |
233 | void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) |
234 | { |
235 | unsigned gb_tile_config; |
236 | int r; |
237 | |
238 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
239 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
240 | switch(rdev->num_gb_pipes) { |
241 | case 2: |
242 | gb_tile_config |= R300_PIPE_COUNT_R300; |
243 | break; |
244 | case 3: |
245 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
246 | break; |
247 | case 4: |
248 | gb_tile_config |= R300_PIPE_COUNT_R420; |
249 | break; |
250 | case 1: |
251 | default: |
252 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
253 | break; |
254 | } |
255 | |
256 | r = radeon_ring_lock(rdev, ring, 64); |
257 | if (r) { |
258 | return; |
259 | } |
260 | radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); |
261 | radeon_ring_write(ring, |
262 | RADEON_ISYNC_ANY2D_IDLE3D | |
263 | RADEON_ISYNC_ANY3D_IDLE2D | |
264 | RADEON_ISYNC_WAIT_IDLEGUI | |
265 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
266 | radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); |
267 | radeon_ring_write(ring, gb_tile_config); |
268 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
269 | radeon_ring_write(ring, |
270 | RADEON_WAIT_2D_IDLECLEAN | |
271 | RADEON_WAIT_3D_IDLECLEAN); |
272 | radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
273 | radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); |
274 | radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); |
275 | radeon_ring_write(ring, 0); |
276 | radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); |
277 | radeon_ring_write(ring, 0); |
278 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
279 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
280 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
281 | radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); |
282 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
283 | radeon_ring_write(ring, |
284 | RADEON_WAIT_2D_IDLECLEAN | |
285 | RADEON_WAIT_3D_IDLECLEAN); |
286 | radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); |
287 | radeon_ring_write(ring, 0); |
288 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
289 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
290 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
291 | radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); |
292 | radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); |
293 | radeon_ring_write(ring, |
294 | ((6 << R300_MS_X0_SHIFT) | |
295 | (6 << R300_MS_Y0_SHIFT) | |
296 | (6 << R300_MS_X1_SHIFT) | |
297 | (6 << R300_MS_Y1_SHIFT) | |
298 | (6 << R300_MS_X2_SHIFT) | |
299 | (6 << R300_MS_Y2_SHIFT) | |
300 | (6 << R300_MSBD0_Y_SHIFT) | |
301 | (6 << R300_MSBD0_X_SHIFT))); |
302 | radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); |
303 | radeon_ring_write(ring, |
304 | ((6 << R300_MS_X3_SHIFT) | |
305 | (6 << R300_MS_Y3_SHIFT) | |
306 | (6 << R300_MS_X4_SHIFT) | |
307 | (6 << R300_MS_Y4_SHIFT) | |
308 | (6 << R300_MS_X5_SHIFT) | |
309 | (6 << R300_MS_Y5_SHIFT) | |
310 | (6 << R300_MSBD1_SHIFT))); |
311 | radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); |
312 | radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); |
313 | radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); |
314 | radeon_ring_write(ring, |
315 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); |
316 | radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); |
317 | radeon_ring_write(ring, |
318 | R300_GEOMETRY_ROUND_NEAREST | |
319 | R300_COLOR_ROUND_NEAREST); |
320 | radeon_ring_unlock_commit(rdev, ring); |
321 | } |
322 | |
323 | static void r300_errata(struct radeon_device *rdev) |
324 | { |
325 | rdev->pll_errata = 0; |
326 | |
327 | if (rdev->family == CHIP_R300 && |
328 | (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { |
329 | rdev->pll_errata |= CHIP_ERRATA_R300_CG; |
330 | } |
331 | } |
332 | |
333 | int r300_mc_wait_for_idle(struct radeon_device *rdev) |
334 | { |
335 | unsigned i; |
336 | uint32_t tmp; |
337 | |
338 | for (i = 0; i < rdev->usec_timeout; i++) { |
339 | /* read MC_STATUS */ |
340 | tmp = RREG32(RADEON_MC_STATUS); |
341 | if (tmp & R300_MC_IDLE) { |
342 | return 0; |
343 | } |
344 | DRM_UDELAY(1); |
345 | } |
346 | return -1; |
347 | } |
348 | |
349 | static void r300_gpu_init(struct radeon_device *rdev) |
350 | { |
351 | uint32_t gb_tile_config, tmp; |
352 | |
353 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || |
354 | (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { |
355 | /* r300,r350 */ |
356 | rdev->num_gb_pipes = 2; |
357 | } else { |
358 | /* rv350,rv370,rv380,r300 AD, r350 AH */ |
359 | rdev->num_gb_pipes = 1; |
360 | } |
361 | rdev->num_z_pipes = 1; |
362 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
363 | switch (rdev->num_gb_pipes) { |
364 | case 2: |
365 | gb_tile_config |= R300_PIPE_COUNT_R300; |
366 | break; |
367 | case 3: |
368 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
369 | break; |
370 | case 4: |
371 | gb_tile_config |= R300_PIPE_COUNT_R420; |
372 | break; |
373 | default: |
374 | case 1: |
375 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
376 | break; |
377 | } |
378 | WREG32(R300_GB_TILE_CONFIG, gb_tile_config); |
379 | |
380 | if (r100_gui_wait_for_idle(rdev)) { |
381 | printk(KERN_WARNING "Failed to wait GUI idle while " |
382 | "programming pipes. Bad things might happen.\n" ); |
383 | } |
384 | |
385 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
386 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
387 | |
388 | WREG32(R300_RB2D_DSTCACHE_MODE, |
389 | R300_DC_AUTOFLUSH_ENABLE | |
390 | R300_DC_DC_DISABLE_IGNORE_PE); |
391 | |
392 | if (r100_gui_wait_for_idle(rdev)) { |
393 | printk(KERN_WARNING "Failed to wait GUI idle while " |
394 | "programming pipes. Bad things might happen.\n" ); |
395 | } |
396 | if (r300_mc_wait_for_idle(rdev)) { |
397 | printk(KERN_WARNING "Failed to wait MC idle while " |
398 | "programming pipes. Bad things might happen.\n" ); |
399 | } |
400 | DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n" , |
401 | rdev->num_gb_pipes, rdev->num_z_pipes); |
402 | } |
403 | |
404 | int r300_asic_reset(struct radeon_device *rdev) |
405 | { |
406 | struct r100_mc_save save; |
407 | u32 status, tmp; |
408 | int ret = 0; |
409 | |
410 | status = RREG32(R_000E40_RBBM_STATUS); |
411 | if (!G_000E40_GUI_ACTIVE(status)) { |
412 | return 0; |
413 | } |
414 | r100_mc_stop(rdev, &save); |
415 | status = RREG32(R_000E40_RBBM_STATUS); |
416 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
417 | /* stop CP */ |
418 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
419 | tmp = RREG32(RADEON_CP_RB_CNTL); |
420 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); |
421 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
422 | WREG32(RADEON_CP_RB_WPTR, 0); |
423 | WREG32(RADEON_CP_RB_CNTL, tmp); |
424 | /* save PCI state */ |
425 | pci_save_state(rdev->pdev); |
426 | /* disable bus mastering */ |
427 | r100_bm_disable(rdev); |
428 | WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | |
429 | S_0000F0_SOFT_RESET_GA(1)); |
430 | RREG32(R_0000F0_RBBM_SOFT_RESET); |
431 | mdelay(500); |
432 | WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
433 | mdelay(1); |
434 | status = RREG32(R_000E40_RBBM_STATUS); |
435 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
436 | /* resetting the CP seems to be problematic sometimes it end up |
437 | * hard locking the computer, but it's necessary for successful |
438 | * reset more test & playing is needed on R3XX/R4XX to find a |
439 | * reliable (if any solution) |
440 | */ |
441 | WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); |
442 | RREG32(R_0000F0_RBBM_SOFT_RESET); |
443 | mdelay(500); |
444 | WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
445 | mdelay(1); |
446 | status = RREG32(R_000E40_RBBM_STATUS); |
447 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
448 | /* restore PCI & busmastering */ |
449 | pci_restore_state(rdev->pdev); |
450 | r100_enable_bm(rdev); |
451 | /* Check if GPU is idle */ |
452 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { |
453 | dev_err(rdev->dev, "failed to reset GPU\n" ); |
454 | ret = -1; |
455 | } else |
456 | dev_info(rdev->dev, "GPU reset succeed\n" ); |
457 | r100_mc_resume(rdev, &save); |
458 | return ret; |
459 | } |
460 | |
461 | /* |
462 | * r300,r350,rv350,rv380 VRAM info |
463 | */ |
464 | void r300_mc_init(struct radeon_device *rdev) |
465 | { |
466 | u64 base; |
467 | u32 tmp; |
468 | |
469 | /* DDR for all card after R300 & IGP */ |
470 | rdev->mc.vram_is_ddr = true; |
471 | tmp = RREG32(RADEON_MEM_CNTL); |
472 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
473 | switch (tmp) { |
474 | case 0: rdev->mc.vram_width = 64; break; |
475 | case 1: rdev->mc.vram_width = 128; break; |
476 | case 2: rdev->mc.vram_width = 256; break; |
477 | default: rdev->mc.vram_width = 128; break; |
478 | } |
479 | r100_vram_init_sizes(rdev); |
480 | base = rdev->mc.aper_base; |
481 | if (rdev->flags & RADEON_IS_IGP) |
482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
483 | radeon_vram_location(rdev, &rdev->mc, base); |
484 | rdev->mc.gtt_base_align = 0; |
485 | if (!(rdev->flags & RADEON_IS_AGP)) |
486 | radeon_gtt_location(rdev, &rdev->mc); |
487 | radeon_update_bandwidth_info(rdev); |
488 | } |
489 | |
490 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
491 | { |
492 | uint32_t link_width_cntl, mask; |
493 | |
494 | if (rdev->flags & RADEON_IS_IGP) |
495 | return; |
496 | |
497 | if (!(rdev->flags & RADEON_IS_PCIE)) |
498 | return; |
499 | |
500 | /* FIXME wait for idle */ |
501 | |
502 | switch (lanes) { |
503 | case 0: |
504 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; |
505 | break; |
506 | case 1: |
507 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; |
508 | break; |
509 | case 2: |
510 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; |
511 | break; |
512 | case 4: |
513 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; |
514 | break; |
515 | case 8: |
516 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; |
517 | break; |
518 | case 12: |
519 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; |
520 | break; |
521 | case 16: |
522 | default: |
523 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; |
524 | break; |
525 | } |
526 | |
527 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
528 | |
529 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == |
530 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) |
531 | return; |
532 | |
533 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | |
534 | RADEON_PCIE_LC_RECONFIG_NOW | |
535 | RADEON_PCIE_LC_RECONFIG_LATER | |
536 | RADEON_PCIE_LC_SHORT_RECONFIG_EN); |
537 | link_width_cntl |= mask; |
538 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
539 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | |
540 | RADEON_PCIE_LC_RECONFIG_NOW)); |
541 | |
542 | /* wait for lane set to complete */ |
543 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
544 | while (link_width_cntl == 0xffffffff) |
545 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
546 | |
547 | } |
548 | |
549 | int rv370_get_pcie_lanes(struct radeon_device *rdev) |
550 | { |
551 | u32 link_width_cntl; |
552 | |
553 | if (rdev->flags & RADEON_IS_IGP) |
554 | return 0; |
555 | |
556 | if (!(rdev->flags & RADEON_IS_PCIE)) |
557 | return 0; |
558 | |
559 | /* FIXME wait for idle */ |
560 | |
561 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
562 | |
563 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { |
564 | case RADEON_PCIE_LC_LINK_WIDTH_X0: |
565 | return 0; |
566 | case RADEON_PCIE_LC_LINK_WIDTH_X1: |
567 | return 1; |
568 | case RADEON_PCIE_LC_LINK_WIDTH_X2: |
569 | return 2; |
570 | case RADEON_PCIE_LC_LINK_WIDTH_X4: |
571 | return 4; |
572 | case RADEON_PCIE_LC_LINK_WIDTH_X8: |
573 | return 8; |
574 | case RADEON_PCIE_LC_LINK_WIDTH_X16: |
575 | default: |
576 | return 16; |
577 | } |
578 | } |
579 | |
580 | #if defined(CONFIG_DEBUG_FS) |
581 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
582 | { |
583 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
584 | struct drm_device *dev = node->minor->dev; |
585 | struct radeon_device *rdev = dev->dev_private; |
586 | uint32_t tmp; |
587 | |
588 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
589 | seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n" , tmp); |
590 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); |
591 | seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n" , tmp); |
592 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); |
593 | seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n" , tmp); |
594 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); |
595 | seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n" , tmp); |
596 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); |
597 | seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n" , tmp); |
598 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); |
599 | seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n" , tmp); |
600 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); |
601 | seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n" , tmp); |
602 | return 0; |
603 | } |
604 | |
605 | static struct drm_info_list rv370_pcie_gart_info_list[] = { |
606 | {"rv370_pcie_gart_info" , rv370_debugfs_pcie_gart_info, 0, NULL}, |
607 | }; |
608 | #endif |
609 | |
610 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
611 | { |
612 | #if defined(CONFIG_DEBUG_FS) |
613 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); |
614 | #else |
615 | return 0; |
616 | #endif |
617 | } |
618 | |
619 | static int r300_packet0_check(struct radeon_cs_parser *p, |
620 | struct radeon_cs_packet *pkt, |
621 | unsigned idx, unsigned reg) |
622 | { |
623 | struct radeon_cs_reloc *reloc; |
624 | struct r100_cs_track *track; |
625 | volatile uint32_t *ib; |
626 | uint32_t tmp, tile_flags = 0; |
627 | unsigned i; |
628 | int r; |
629 | u32 idx_value; |
630 | |
631 | ib = p->ib.ptr; |
632 | track = (struct r100_cs_track *)p->track; |
633 | idx_value = radeon_get_ib_value(p, idx); |
634 | |
635 | switch(reg) { |
636 | case AVIVO_D1MODE_VLINE_START_END: |
637 | case RADEON_CRTC_GUI_TRIG_VLINE: |
638 | r = r100_cs_packet_parse_vline(p); |
639 | if (r) { |
640 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
641 | idx, reg); |
642 | radeon_cs_dump_packet(p, pkt); |
643 | return r; |
644 | } |
645 | break; |
646 | case RADEON_DST_PITCH_OFFSET: |
647 | case RADEON_SRC_PITCH_OFFSET: |
648 | r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
649 | if (r) |
650 | return r; |
651 | break; |
652 | case R300_RB3D_COLOROFFSET0: |
653 | case R300_RB3D_COLOROFFSET1: |
654 | case R300_RB3D_COLOROFFSET2: |
655 | case R300_RB3D_COLOROFFSET3: |
656 | i = (reg - R300_RB3D_COLOROFFSET0) >> 2; |
657 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
658 | if (r) { |
659 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
660 | idx, reg); |
661 | radeon_cs_dump_packet(p, pkt); |
662 | return r; |
663 | } |
664 | track->cb[i].robj = reloc->robj; |
665 | track->cb[i].offset = idx_value; |
666 | track->cb_dirty = true; |
667 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
668 | break; |
669 | case R300_ZB_DEPTHOFFSET: |
670 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
671 | if (r) { |
672 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
673 | idx, reg); |
674 | radeon_cs_dump_packet(p, pkt); |
675 | return r; |
676 | } |
677 | track->zb.robj = reloc->robj; |
678 | track->zb.offset = idx_value; |
679 | track->zb_dirty = true; |
680 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
681 | break; |
682 | case R300_TX_OFFSET_0: |
683 | case R300_TX_OFFSET_0+4: |
684 | case R300_TX_OFFSET_0+8: |
685 | case R300_TX_OFFSET_0+12: |
686 | case R300_TX_OFFSET_0+16: |
687 | case R300_TX_OFFSET_0+20: |
688 | case R300_TX_OFFSET_0+24: |
689 | case R300_TX_OFFSET_0+28: |
690 | case R300_TX_OFFSET_0+32: |
691 | case R300_TX_OFFSET_0+36: |
692 | case R300_TX_OFFSET_0+40: |
693 | case R300_TX_OFFSET_0+44: |
694 | case R300_TX_OFFSET_0+48: |
695 | case R300_TX_OFFSET_0+52: |
696 | case R300_TX_OFFSET_0+56: |
697 | case R300_TX_OFFSET_0+60: |
698 | i = (reg - R300_TX_OFFSET_0) >> 2; |
699 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
700 | if (r) { |
701 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
702 | idx, reg); |
703 | radeon_cs_dump_packet(p, pkt); |
704 | return r; |
705 | } |
706 | |
707 | if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { |
708 | ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ |
709 | ((idx_value & ~31) + (u32)reloc->gpu_offset); |
710 | } else { |
711 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
712 | tile_flags |= R300_TXO_MACRO_TILE; |
713 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
714 | tile_flags |= R300_TXO_MICRO_TILE; |
715 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
716 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; |
717 | |
718 | tmp = idx_value + ((u32)reloc->gpu_offset); |
719 | tmp |= tile_flags; |
720 | ib[idx] = tmp; |
721 | } |
722 | track->textures[i].robj = reloc->robj; |
723 | track->tex_dirty = true; |
724 | break; |
725 | /* Tracked registers */ |
726 | case 0x2084: |
727 | /* VAP_VF_CNTL */ |
728 | track->vap_vf_cntl = idx_value; |
729 | break; |
730 | case 0x20B4: |
731 | /* VAP_VTX_SIZE */ |
732 | track->vtx_size = idx_value & 0x7F; |
733 | break; |
734 | case 0x2134: |
735 | /* VAP_VF_MAX_VTX_INDX */ |
736 | track->max_indx = idx_value & 0x00FFFFFFUL; |
737 | break; |
738 | case 0x2088: |
739 | /* VAP_ALT_NUM_VERTICES - only valid on r500 */ |
740 | if (p->rdev->family < CHIP_RV515) |
741 | goto fail; |
742 | track->vap_alt_nverts = idx_value & 0xFFFFFF; |
743 | break; |
744 | case 0x43E4: |
745 | /* SC_SCISSOR1 */ |
746 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
747 | if (p->rdev->family < CHIP_RV515) { |
748 | track->maxy -= 1440; |
749 | } |
750 | track->cb_dirty = true; |
751 | track->zb_dirty = true; |
752 | break; |
753 | case 0x4E00: |
754 | /* RB3D_CCTL */ |
755 | if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ |
756 | p->rdev->cmask_filp != p->filp) { |
757 | DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n" ); |
758 | return -EINVAL; |
759 | } |
760 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
761 | track->cb_dirty = true; |
762 | break; |
763 | case 0x4E38: |
764 | case 0x4E3C: |
765 | case 0x4E40: |
766 | case 0x4E44: |
767 | /* RB3D_COLORPITCH0 */ |
768 | /* RB3D_COLORPITCH1 */ |
769 | /* RB3D_COLORPITCH2 */ |
770 | /* RB3D_COLORPITCH3 */ |
771 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
772 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
773 | if (r) { |
774 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
775 | idx, reg); |
776 | radeon_cs_dump_packet(p, pkt); |
777 | return r; |
778 | } |
779 | |
780 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
781 | tile_flags |= R300_COLOR_TILE_ENABLE; |
782 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
783 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
784 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
785 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; |
786 | |
787 | tmp = idx_value & ~(0x7 << 16); |
788 | tmp |= tile_flags; |
789 | ib[idx] = tmp; |
790 | } |
791 | i = (reg - 0x4E38) >> 2; |
792 | track->cb[i].pitch = idx_value & 0x3FFE; |
793 | switch (((idx_value >> 21) & 0xF)) { |
794 | case 9: |
795 | case 11: |
796 | case 12: |
797 | track->cb[i].cpp = 1; |
798 | break; |
799 | case 3: |
800 | case 4: |
801 | case 13: |
802 | case 15: |
803 | track->cb[i].cpp = 2; |
804 | break; |
805 | case 5: |
806 | if (p->rdev->family < CHIP_RV515) { |
807 | DRM_ERROR("Invalid color buffer format (%d)!\n" , |
808 | ((idx_value >> 21) & 0xF)); |
809 | return -EINVAL; |
810 | } |
811 | /* Pass through. */ |
812 | case 6: |
813 | track->cb[i].cpp = 4; |
814 | break; |
815 | case 10: |
816 | track->cb[i].cpp = 8; |
817 | break; |
818 | case 7: |
819 | track->cb[i].cpp = 16; |
820 | break; |
821 | default: |
822 | DRM_ERROR("Invalid color buffer format (%d) !\n" , |
823 | ((idx_value >> 21) & 0xF)); |
824 | return -EINVAL; |
825 | } |
826 | track->cb_dirty = true; |
827 | break; |
828 | case 0x4F00: |
829 | /* ZB_CNTL */ |
830 | if (idx_value & 2) { |
831 | track->z_enabled = true; |
832 | } else { |
833 | track->z_enabled = false; |
834 | } |
835 | track->zb_dirty = true; |
836 | break; |
837 | case 0x4F10: |
838 | /* ZB_FORMAT */ |
839 | switch ((idx_value & 0xF)) { |
840 | case 0: |
841 | case 1: |
842 | track->zb.cpp = 2; |
843 | break; |
844 | case 2: |
845 | track->zb.cpp = 4; |
846 | break; |
847 | default: |
848 | DRM_ERROR("Invalid z buffer format (%d) !\n" , |
849 | (idx_value & 0xF)); |
850 | return -EINVAL; |
851 | } |
852 | track->zb_dirty = true; |
853 | break; |
854 | case 0x4F24: |
855 | /* ZB_DEPTHPITCH */ |
856 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
857 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
858 | if (r) { |
859 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
860 | idx, reg); |
861 | radeon_cs_dump_packet(p, pkt); |
862 | return r; |
863 | } |
864 | |
865 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
866 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
867 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
868 | tile_flags |= R300_DEPTHMICROTILE_TILED; |
869 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
870 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; |
871 | |
872 | tmp = idx_value & ~(0x7 << 16); |
873 | tmp |= tile_flags; |
874 | ib[idx] = tmp; |
875 | } |
876 | track->zb.pitch = idx_value & 0x3FFC; |
877 | track->zb_dirty = true; |
878 | break; |
879 | case 0x4104: |
880 | /* TX_ENABLE */ |
881 | for (i = 0; i < 16; i++) { |
882 | bool enabled; |
883 | |
884 | enabled = !!(idx_value & (1 << i)); |
885 | track->textures[i].enabled = enabled; |
886 | } |
887 | track->tex_dirty = true; |
888 | break; |
889 | case 0x44C0: |
890 | case 0x44C4: |
891 | case 0x44C8: |
892 | case 0x44CC: |
893 | case 0x44D0: |
894 | case 0x44D4: |
895 | case 0x44D8: |
896 | case 0x44DC: |
897 | case 0x44E0: |
898 | case 0x44E4: |
899 | case 0x44E8: |
900 | case 0x44EC: |
901 | case 0x44F0: |
902 | case 0x44F4: |
903 | case 0x44F8: |
904 | case 0x44FC: |
905 | /* TX_FORMAT1_[0-15] */ |
906 | i = (reg - 0x44C0) >> 2; |
907 | tmp = (idx_value >> 25) & 0x3; |
908 | track->textures[i].tex_coord_type = tmp; |
909 | switch ((idx_value & 0x1F)) { |
910 | case R300_TX_FORMAT_X8: |
911 | case R300_TX_FORMAT_Y4X4: |
912 | case R300_TX_FORMAT_Z3Y3X2: |
913 | track->textures[i].cpp = 1; |
914 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
915 | break; |
916 | case R300_TX_FORMAT_X16: |
917 | case R300_TX_FORMAT_FL_I16: |
918 | case R300_TX_FORMAT_Y8X8: |
919 | case R300_TX_FORMAT_Z5Y6X5: |
920 | case R300_TX_FORMAT_Z6Y5X5: |
921 | case R300_TX_FORMAT_W4Z4Y4X4: |
922 | case R300_TX_FORMAT_W1Z5Y5X5: |
923 | case R300_TX_FORMAT_D3DMFT_CxV8U8: |
924 | case R300_TX_FORMAT_B8G8_B8G8: |
925 | case R300_TX_FORMAT_G8R8_G8B8: |
926 | track->textures[i].cpp = 2; |
927 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
928 | break; |
929 | case R300_TX_FORMAT_Y16X16: |
930 | case R300_TX_FORMAT_FL_I16A16: |
931 | case R300_TX_FORMAT_Z11Y11X10: |
932 | case R300_TX_FORMAT_Z10Y11X11: |
933 | case R300_TX_FORMAT_W8Z8Y8X8: |
934 | case R300_TX_FORMAT_W2Z10Y10X10: |
935 | case 0x17: |
936 | case R300_TX_FORMAT_FL_I32: |
937 | case 0x1e: |
938 | track->textures[i].cpp = 4; |
939 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
940 | break; |
941 | case R300_TX_FORMAT_W16Z16Y16X16: |
942 | case R300_TX_FORMAT_FL_R16G16B16A16: |
943 | case R300_TX_FORMAT_FL_I32A32: |
944 | track->textures[i].cpp = 8; |
945 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
946 | break; |
947 | case R300_TX_FORMAT_FL_R32G32B32A32: |
948 | track->textures[i].cpp = 16; |
949 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
950 | break; |
951 | case R300_TX_FORMAT_DXT1: |
952 | track->textures[i].cpp = 1; |
953 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; |
954 | break; |
955 | case R300_TX_FORMAT_ATI2N: |
956 | if (p->rdev->family < CHIP_R420) { |
957 | DRM_ERROR("Invalid texture format %u\n" , |
958 | (idx_value & 0x1F)); |
959 | return -EINVAL; |
960 | } |
961 | /* The same rules apply as for DXT3/5. */ |
962 | /* Pass through. */ |
963 | case R300_TX_FORMAT_DXT3: |
964 | case R300_TX_FORMAT_DXT5: |
965 | track->textures[i].cpp = 1; |
966 | track->textures[i].compress_format = R100_TRACK_COMP_DXT35; |
967 | break; |
968 | default: |
969 | DRM_ERROR("Invalid texture format %u\n" , |
970 | (idx_value & 0x1F)); |
971 | return -EINVAL; |
972 | } |
973 | track->tex_dirty = true; |
974 | break; |
975 | case 0x4400: |
976 | case 0x4404: |
977 | case 0x4408: |
978 | case 0x440C: |
979 | case 0x4410: |
980 | case 0x4414: |
981 | case 0x4418: |
982 | case 0x441C: |
983 | case 0x4420: |
984 | case 0x4424: |
985 | case 0x4428: |
986 | case 0x442C: |
987 | case 0x4430: |
988 | case 0x4434: |
989 | case 0x4438: |
990 | case 0x443C: |
991 | /* TX_FILTER0_[0-15] */ |
992 | i = (reg - 0x4400) >> 2; |
993 | tmp = idx_value & 0x7; |
994 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
995 | track->textures[i].roundup_w = false; |
996 | } |
997 | tmp = (idx_value >> 3) & 0x7; |
998 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
999 | track->textures[i].roundup_h = false; |
1000 | } |
1001 | track->tex_dirty = true; |
1002 | break; |
1003 | case 0x4500: |
1004 | case 0x4504: |
1005 | case 0x4508: |
1006 | case 0x450C: |
1007 | case 0x4510: |
1008 | case 0x4514: |
1009 | case 0x4518: |
1010 | case 0x451C: |
1011 | case 0x4520: |
1012 | case 0x4524: |
1013 | case 0x4528: |
1014 | case 0x452C: |
1015 | case 0x4530: |
1016 | case 0x4534: |
1017 | case 0x4538: |
1018 | case 0x453C: |
1019 | /* TX_FORMAT2_[0-15] */ |
1020 | i = (reg - 0x4500) >> 2; |
1021 | tmp = idx_value & 0x3FFF; |
1022 | track->textures[i].pitch = tmp + 1; |
1023 | if (p->rdev->family >= CHIP_RV515) { |
1024 | tmp = ((idx_value >> 15) & 1) << 11; |
1025 | track->textures[i].width_11 = tmp; |
1026 | tmp = ((idx_value >> 16) & 1) << 11; |
1027 | track->textures[i].height_11 = tmp; |
1028 | |
1029 | /* ATI1N */ |
1030 | if (idx_value & (1 << 14)) { |
1031 | /* The same rules apply as for DXT1. */ |
1032 | track->textures[i].compress_format = |
1033 | R100_TRACK_COMP_DXT1; |
1034 | } |
1035 | } else if (idx_value & (1 << 14)) { |
1036 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n" ); |
1037 | return -EINVAL; |
1038 | } |
1039 | track->tex_dirty = true; |
1040 | break; |
1041 | case 0x4480: |
1042 | case 0x4484: |
1043 | case 0x4488: |
1044 | case 0x448C: |
1045 | case 0x4490: |
1046 | case 0x4494: |
1047 | case 0x4498: |
1048 | case 0x449C: |
1049 | case 0x44A0: |
1050 | case 0x44A4: |
1051 | case 0x44A8: |
1052 | case 0x44AC: |
1053 | case 0x44B0: |
1054 | case 0x44B4: |
1055 | case 0x44B8: |
1056 | case 0x44BC: |
1057 | /* TX_FORMAT0_[0-15] */ |
1058 | i = (reg - 0x4480) >> 2; |
1059 | tmp = idx_value & 0x7FF; |
1060 | track->textures[i].width = tmp + 1; |
1061 | tmp = (idx_value >> 11) & 0x7FF; |
1062 | track->textures[i].height = tmp + 1; |
1063 | tmp = (idx_value >> 26) & 0xF; |
1064 | track->textures[i].num_levels = tmp; |
1065 | tmp = idx_value & (1 << 31); |
1066 | track->textures[i].use_pitch = !!tmp; |
1067 | tmp = (idx_value >> 22) & 0xF; |
1068 | track->textures[i].txdepth = tmp; |
1069 | track->tex_dirty = true; |
1070 | break; |
1071 | case R300_ZB_ZPASS_ADDR: |
1072 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1073 | if (r) { |
1074 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1075 | idx, reg); |
1076 | radeon_cs_dump_packet(p, pkt); |
1077 | return r; |
1078 | } |
1079 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1080 | break; |
1081 | case 0x4e0c: |
1082 | /* RB3D_COLOR_CHANNEL_MASK */ |
1083 | track->color_channel_mask = idx_value; |
1084 | track->cb_dirty = true; |
1085 | break; |
1086 | case 0x43a4: |
1087 | /* SC_HYPERZ_EN */ |
1088 | /* r300c emits this register - we need to disable hyperz for it |
1089 | * without complaining */ |
1090 | if (p->rdev->hyperz_filp != p->filp) { |
1091 | if (idx_value & 0x1) |
1092 | ib[idx] = idx_value & ~1; |
1093 | } |
1094 | break; |
1095 | case 0x4f1c: |
1096 | /* ZB_BW_CNTL */ |
1097 | track->zb_cb_clear = !!(idx_value & (1 << 5)); |
1098 | track->cb_dirty = true; |
1099 | track->zb_dirty = true; |
1100 | if (p->rdev->hyperz_filp != p->filp) { |
1101 | if (idx_value & (R300_HIZ_ENABLE | |
1102 | R300_RD_COMP_ENABLE | |
1103 | R300_WR_COMP_ENABLE | |
1104 | R300_FAST_FILL_ENABLE)) |
1105 | goto fail; |
1106 | } |
1107 | break; |
1108 | case 0x4e04: |
1109 | /* RB3D_BLENDCNTL */ |
1110 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1111 | track->cb_dirty = true; |
1112 | break; |
1113 | case R300_RB3D_AARESOLVE_OFFSET: |
1114 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1115 | if (r) { |
1116 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1117 | idx, reg); |
1118 | radeon_cs_dump_packet(p, pkt); |
1119 | return r; |
1120 | } |
1121 | track->aa.robj = reloc->robj; |
1122 | track->aa.offset = idx_value; |
1123 | track->aa_dirty = true; |
1124 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1125 | break; |
1126 | case R300_RB3D_AARESOLVE_PITCH: |
1127 | track->aa.pitch = idx_value & 0x3FFE; |
1128 | track->aa_dirty = true; |
1129 | break; |
1130 | case R300_RB3D_AARESOLVE_CTL: |
1131 | track->aaresolve = idx_value & 0x1; |
1132 | track->aa_dirty = true; |
1133 | break; |
1134 | case 0x4f30: /* ZB_MASK_OFFSET */ |
1135 | case 0x4f34: /* ZB_ZMASK_PITCH */ |
1136 | case 0x4f44: /* ZB_HIZ_OFFSET */ |
1137 | case 0x4f54: /* ZB_HIZ_PITCH */ |
1138 | if (idx_value && (p->rdev->hyperz_filp != p->filp)) |
1139 | goto fail; |
1140 | break; |
1141 | case 0x4028: |
1142 | if (idx_value && (p->rdev->hyperz_filp != p->filp)) |
1143 | goto fail; |
1144 | /* GB_Z_PEQ_CONFIG */ |
1145 | if (p->rdev->family >= CHIP_RV350) |
1146 | break; |
1147 | goto fail; |
1148 | break; |
1149 | case 0x4be8: |
1150 | /* valid register only on RV530 */ |
1151 | if (p->rdev->family == CHIP_RV530) |
1152 | break; |
1153 | /* fallthrough do not move */ |
1154 | default: |
1155 | goto fail; |
1156 | } |
1157 | return 0; |
1158 | fail: |
1159 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n" , |
1160 | reg, idx, idx_value); |
1161 | return -EINVAL; |
1162 | } |
1163 | |
1164 | static int r300_packet3_check(struct radeon_cs_parser *p, |
1165 | struct radeon_cs_packet *pkt) |
1166 | { |
1167 | struct radeon_cs_reloc *reloc; |
1168 | struct r100_cs_track *track; |
1169 | volatile uint32_t *ib; |
1170 | unsigned idx; |
1171 | int r; |
1172 | |
1173 | ib = p->ib.ptr; |
1174 | idx = pkt->idx + 1; |
1175 | track = (struct r100_cs_track *)p->track; |
1176 | switch(pkt->opcode) { |
1177 | case PACKET3_3D_LOAD_VBPNTR: |
1178 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1179 | if (r) |
1180 | return r; |
1181 | break; |
1182 | case PACKET3_INDX_BUFFER: |
1183 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1184 | if (r) { |
1185 | DRM_ERROR("No reloc for packet3 %d\n" , pkt->opcode); |
1186 | radeon_cs_dump_packet(p, pkt); |
1187 | return r; |
1188 | } |
1189 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); |
1190 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1191 | if (r) { |
1192 | return r; |
1193 | } |
1194 | break; |
1195 | /* Draw packet */ |
1196 | case PACKET3_3D_DRAW_IMMD: |
1197 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1198 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1199 | * in cmd stream */ |
1200 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1201 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n" ); |
1202 | return -EINVAL; |
1203 | } |
1204 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1205 | track->immd_dwords = pkt->count - 1; |
1206 | r = r100_cs_track_check(p->rdev, track); |
1207 | if (r) { |
1208 | return r; |
1209 | } |
1210 | break; |
1211 | case PACKET3_3D_DRAW_IMMD_2: |
1212 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1213 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1214 | * in cmd stream */ |
1215 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1216 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n" ); |
1217 | return -EINVAL; |
1218 | } |
1219 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1220 | track->immd_dwords = pkt->count; |
1221 | r = r100_cs_track_check(p->rdev, track); |
1222 | if (r) { |
1223 | return r; |
1224 | } |
1225 | break; |
1226 | case PACKET3_3D_DRAW_VBUF: |
1227 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1228 | r = r100_cs_track_check(p->rdev, track); |
1229 | if (r) { |
1230 | return r; |
1231 | } |
1232 | break; |
1233 | case PACKET3_3D_DRAW_VBUF_2: |
1234 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1235 | r = r100_cs_track_check(p->rdev, track); |
1236 | if (r) { |
1237 | return r; |
1238 | } |
1239 | break; |
1240 | case PACKET3_3D_DRAW_INDX: |
1241 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1242 | r = r100_cs_track_check(p->rdev, track); |
1243 | if (r) { |
1244 | return r; |
1245 | } |
1246 | break; |
1247 | case PACKET3_3D_DRAW_INDX_2: |
1248 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1249 | r = r100_cs_track_check(p->rdev, track); |
1250 | if (r) { |
1251 | return r; |
1252 | } |
1253 | break; |
1254 | case PACKET3_3D_CLEAR_HIZ: |
1255 | case PACKET3_3D_CLEAR_ZMASK: |
1256 | if (p->rdev->hyperz_filp != p->filp) |
1257 | return -EINVAL; |
1258 | break; |
1259 | case PACKET3_3D_CLEAR_CMASK: |
1260 | if (p->rdev->cmask_filp != p->filp) |
1261 | return -EINVAL; |
1262 | break; |
1263 | case PACKET3_NOP: |
1264 | break; |
1265 | default: |
1266 | DRM_ERROR("Packet3 opcode %x not supported\n" , pkt->opcode); |
1267 | return -EINVAL; |
1268 | } |
1269 | return 0; |
1270 | } |
1271 | |
1272 | int r300_cs_parse(struct radeon_cs_parser *p) |
1273 | { |
1274 | struct radeon_cs_packet pkt; |
1275 | struct r100_cs_track *track; |
1276 | int r; |
1277 | |
1278 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
1279 | if (track == NULL) |
1280 | return -ENOMEM; |
1281 | r100_cs_track_clear(p->rdev, track); |
1282 | p->track = track; |
1283 | do { |
1284 | r = radeon_cs_packet_parse(p, &pkt, p->idx); |
1285 | if (r) { |
1286 | return r; |
1287 | } |
1288 | p->idx += pkt.count + 2; |
1289 | switch (pkt.type) { |
1290 | case RADEON_PACKET_TYPE0: |
1291 | r = r100_cs_parse_packet0(p, &pkt, |
1292 | p->rdev->config.r300.reg_safe_bm, |
1293 | p->rdev->config.r300.reg_safe_bm_size, |
1294 | &r300_packet0_check); |
1295 | break; |
1296 | case RADEON_PACKET_TYPE2: |
1297 | break; |
1298 | case RADEON_PACKET_TYPE3: |
1299 | r = r300_packet3_check(p, &pkt); |
1300 | break; |
1301 | default: |
1302 | DRM_ERROR("Unknown packet type %d !\n" , pkt.type); |
1303 | return -EINVAL; |
1304 | } |
1305 | if (r) { |
1306 | return r; |
1307 | } |
1308 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
1309 | return 0; |
1310 | } |
1311 | |
1312 | void r300_set_reg_safe(struct radeon_device *rdev) |
1313 | { |
1314 | rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; |
1315 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
1316 | } |
1317 | |
1318 | void r300_mc_program(struct radeon_device *rdev) |
1319 | { |
1320 | struct r100_mc_save save; |
1321 | int r; |
1322 | |
1323 | r = r100_debugfs_mc_info_init(rdev); |
1324 | if (r) { |
1325 | dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n" ); |
1326 | } |
1327 | |
1328 | /* Stops all mc clients */ |
1329 | r100_mc_stop(rdev, &save); |
1330 | if (rdev->flags & RADEON_IS_AGP) { |
1331 | WREG32(R_00014C_MC_AGP_LOCATION, |
1332 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | |
1333 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); |
1334 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); |
1335 | WREG32(R_00015C_AGP_BASE_2, |
1336 | upper_32_bits(rdev->mc.agp_base) & 0xff); |
1337 | } else { |
1338 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); |
1339 | WREG32(R_000170_AGP_BASE, 0); |
1340 | WREG32(R_00015C_AGP_BASE_2, 0); |
1341 | } |
1342 | /* Wait for mc idle */ |
1343 | if (r300_mc_wait_for_idle(rdev)) |
1344 | DRM_INFO("Failed to wait MC idle before programming MC.\n" ); |
1345 | /* Program MC, should be a 32bits limited address space */ |
1346 | WREG32(R_000148_MC_FB_LOCATION, |
1347 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
1348 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
1349 | r100_mc_resume(rdev, &save); |
1350 | } |
1351 | |
1352 | void r300_clock_startup(struct radeon_device *rdev) |
1353 | { |
1354 | u32 tmp; |
1355 | |
1356 | if (radeon_dynclks != -1 && radeon_dynclks) |
1357 | radeon_legacy_set_clock_gating(rdev, 1); |
1358 | /* We need to force on some of the block */ |
1359 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); |
1360 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
1361 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) |
1362 | tmp |= S_00000D_FORCE_VAP(1); |
1363 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); |
1364 | } |
1365 | |
1366 | static int r300_startup(struct radeon_device *rdev) |
1367 | { |
1368 | int r; |
1369 | |
1370 | /* set common regs */ |
1371 | r100_set_common_regs(rdev); |
1372 | /* program mc */ |
1373 | r300_mc_program(rdev); |
1374 | /* Resume clock */ |
1375 | r300_clock_startup(rdev); |
1376 | /* Initialize GPU configuration (# pipes, ...) */ |
1377 | r300_gpu_init(rdev); |
1378 | /* Initialize GART (initialize after TTM so we can allocate |
1379 | * memory through TTM but finalize after TTM) */ |
1380 | if (rdev->flags & RADEON_IS_PCIE) { |
1381 | r = rv370_pcie_gart_enable(rdev); |
1382 | if (r) |
1383 | return r; |
1384 | } |
1385 | |
1386 | if (rdev->family == CHIP_R300 || |
1387 | rdev->family == CHIP_R350 || |
1388 | rdev->family == CHIP_RV350) |
1389 | r100_enable_bm(rdev); |
1390 | |
1391 | if (rdev->flags & RADEON_IS_PCI) { |
1392 | r = r100_pci_gart_enable(rdev); |
1393 | if (r) |
1394 | return r; |
1395 | } |
1396 | |
1397 | /* allocate wb buffer */ |
1398 | r = radeon_wb_init(rdev); |
1399 | if (r) |
1400 | return r; |
1401 | |
1402 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
1403 | if (r) { |
1404 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n" , r); |
1405 | return r; |
1406 | } |
1407 | |
1408 | /* Enable IRQ */ |
1409 | if (!rdev->irq.installed) { |
1410 | r = radeon_irq_kms_init(rdev); |
1411 | if (r) |
1412 | return r; |
1413 | } |
1414 | |
1415 | r100_irq_set(rdev); |
1416 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
1417 | /* 1M ring buffer */ |
1418 | r = r100_cp_init(rdev, 1024 * 1024); |
1419 | if (r) { |
1420 | dev_err(rdev->dev, "failed initializing CP (%d).\n" , r); |
1421 | return r; |
1422 | } |
1423 | |
1424 | r = radeon_ib_pool_init(rdev); |
1425 | if (r) { |
1426 | dev_err(rdev->dev, "IB initialization failed (%d).\n" , r); |
1427 | return r; |
1428 | } |
1429 | |
1430 | return 0; |
1431 | } |
1432 | |
1433 | int r300_resume(struct radeon_device *rdev) |
1434 | { |
1435 | int r; |
1436 | |
1437 | /* Make sur GART are not working */ |
1438 | if (rdev->flags & RADEON_IS_PCIE) |
1439 | rv370_pcie_gart_disable(rdev); |
1440 | if (rdev->flags & RADEON_IS_PCI) |
1441 | r100_pci_gart_disable(rdev); |
1442 | /* Resume clock before doing reset */ |
1443 | r300_clock_startup(rdev); |
1444 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1445 | if (radeon_asic_reset(rdev)) { |
1446 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n" , |
1447 | RREG32(R_000E40_RBBM_STATUS), |
1448 | RREG32(R_0007C0_CP_STAT)); |
1449 | } |
1450 | /* post */ |
1451 | radeon_combios_asic_init(rdev->ddev); |
1452 | /* Resume clock after posting */ |
1453 | r300_clock_startup(rdev); |
1454 | /* Initialize surface registers */ |
1455 | radeon_surface_init(rdev); |
1456 | |
1457 | rdev->accel_working = true; |
1458 | r = r300_startup(rdev); |
1459 | if (r) { |
1460 | rdev->accel_working = false; |
1461 | } |
1462 | return r; |
1463 | } |
1464 | |
1465 | int r300_suspend(struct radeon_device *rdev) |
1466 | { |
1467 | radeon_pm_suspend(rdev); |
1468 | r100_cp_disable(rdev); |
1469 | radeon_wb_disable(rdev); |
1470 | r100_irq_disable(rdev); |
1471 | if (rdev->flags & RADEON_IS_PCIE) |
1472 | rv370_pcie_gart_disable(rdev); |
1473 | if (rdev->flags & RADEON_IS_PCI) |
1474 | r100_pci_gart_disable(rdev); |
1475 | return 0; |
1476 | } |
1477 | |
1478 | void r300_fini(struct radeon_device *rdev) |
1479 | { |
1480 | radeon_pm_fini(rdev); |
1481 | r100_cp_fini(rdev); |
1482 | radeon_wb_fini(rdev); |
1483 | radeon_ib_pool_fini(rdev); |
1484 | radeon_gem_fini(rdev); |
1485 | if (rdev->flags & RADEON_IS_PCIE) |
1486 | rv370_pcie_gart_fini(rdev); |
1487 | if (rdev->flags & RADEON_IS_PCI) |
1488 | r100_pci_gart_fini(rdev); |
1489 | radeon_agp_fini(rdev); |
1490 | radeon_irq_kms_fini(rdev); |
1491 | radeon_fence_driver_fini(rdev); |
1492 | radeon_bo_fini(rdev); |
1493 | radeon_atombios_fini(rdev); |
1494 | kfree(rdev->bios); |
1495 | rdev->bios = NULL; |
1496 | } |
1497 | |
1498 | int r300_init(struct radeon_device *rdev) |
1499 | { |
1500 | int r; |
1501 | |
1502 | /* Disable VGA */ |
1503 | r100_vga_render_disable(rdev); |
1504 | /* Initialize scratch registers */ |
1505 | radeon_scratch_init(rdev); |
1506 | /* Initialize surface registers */ |
1507 | radeon_surface_init(rdev); |
1508 | /* TODO: disable VGA need to use VGA request */ |
1509 | /* restore some register to sane defaults */ |
1510 | r100_restore_sanity(rdev); |
1511 | /* BIOS*/ |
1512 | if (!radeon_get_bios(rdev)) { |
1513 | if (ASIC_IS_AVIVO(rdev)) |
1514 | return -EINVAL; |
1515 | } |
1516 | if (rdev->is_atom_bios) { |
1517 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n" ); |
1518 | return -EINVAL; |
1519 | } else { |
1520 | r = radeon_combios_init(rdev); |
1521 | if (r) |
1522 | return r; |
1523 | } |
1524 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1525 | if (radeon_asic_reset(rdev)) { |
1526 | dev_warn(rdev->dev, |
1527 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n" , |
1528 | RREG32(R_000E40_RBBM_STATUS), |
1529 | RREG32(R_0007C0_CP_STAT)); |
1530 | } |
1531 | /* check if cards are posted or not */ |
1532 | if (radeon_boot_test_post_card(rdev) == false) |
1533 | return -EINVAL; |
1534 | /* Set asic errata */ |
1535 | r300_errata(rdev); |
1536 | /* Initialize clocks */ |
1537 | radeon_get_clock_info(rdev->ddev); |
1538 | /* initialize AGP */ |
1539 | if (rdev->flags & RADEON_IS_AGP) { |
1540 | r = radeon_agp_init(rdev); |
1541 | if (r) { |
1542 | radeon_agp_disable(rdev); |
1543 | } |
1544 | } |
1545 | /* initialize memory controller */ |
1546 | r300_mc_init(rdev); |
1547 | /* Fence driver */ |
1548 | r = radeon_fence_driver_init(rdev); |
1549 | if (r) |
1550 | return r; |
1551 | /* Memory manager */ |
1552 | r = radeon_bo_init(rdev); |
1553 | if (r) |
1554 | return r; |
1555 | if (rdev->flags & RADEON_IS_PCIE) { |
1556 | r = rv370_pcie_gart_init(rdev); |
1557 | if (r) |
1558 | return r; |
1559 | } |
1560 | if (rdev->flags & RADEON_IS_PCI) { |
1561 | r = r100_pci_gart_init(rdev); |
1562 | if (r) |
1563 | return r; |
1564 | } |
1565 | r300_set_reg_safe(rdev); |
1566 | |
1567 | /* Initialize power management */ |
1568 | radeon_pm_init(rdev); |
1569 | |
1570 | rdev->accel_working = true; |
1571 | r = r300_startup(rdev); |
1572 | if (r) { |
1573 | /* Something went wrong with the accel init, so stop accel */ |
1574 | dev_err(rdev->dev, "Disabling GPU acceleration\n" ); |
1575 | r100_cp_fini(rdev); |
1576 | radeon_wb_fini(rdev); |
1577 | radeon_ib_pool_fini(rdev); |
1578 | radeon_irq_kms_fini(rdev); |
1579 | if (rdev->flags & RADEON_IS_PCIE) |
1580 | rv370_pcie_gart_fini(rdev); |
1581 | if (rdev->flags & RADEON_IS_PCI) |
1582 | r100_pci_gart_fini(rdev); |
1583 | radeon_agp_fini(rdev); |
1584 | rdev->accel_working = false; |
1585 | } |
1586 | return 0; |
1587 | } |
1588 | |