1 | /* $NetBSD: pciide_common.c,v 1.62 2016/10/13 17:11:09 jdolecek Exp $ */ |
2 | |
3 | |
4 | /* |
5 | * Copyright (c) 1999, 2000, 2001, 2003 Manuel Bouyer. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * |
27 | */ |
28 | |
29 | |
30 | /* |
31 | * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. |
32 | * |
33 | * Redistribution and use in source and binary forms, with or without |
34 | * modification, are permitted provided that the following conditions |
35 | * are met: |
36 | * 1. Redistributions of source code must retain the above copyright |
37 | * notice, this list of conditions and the following disclaimer. |
38 | * 2. Redistributions in binary form must reproduce the above copyright |
39 | * notice, this list of conditions and the following disclaimer in the |
40 | * documentation and/or other materials provided with the distribution. |
41 | * 3. All advertising materials mentioning features or use of this software |
42 | * must display the following acknowledgement: |
43 | * This product includes software developed by Christopher G. Demetriou |
44 | * for the NetBSD Project. |
45 | * 4. The name of the author may not be used to endorse or promote products |
46 | * derived from this software without specific prior written permission |
47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
49 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
50 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
51 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
52 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
53 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
54 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
55 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
56 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
57 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
58 | */ |
59 | |
60 | /* |
61 | * PCI IDE controller driver. |
62 | * |
63 | * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD |
64 | * sys/dev/pci/ppb.c, revision 1.16). |
65 | * |
66 | * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and |
67 | * "Programming Interface for Bus Master IDE Controller, Revision 1.0 |
68 | * 5/16/94" from the PCI SIG. |
69 | * |
70 | */ |
71 | |
72 | #include <sys/cdefs.h> |
73 | __KERNEL_RCSID(0, "$NetBSD: pciide_common.c,v 1.62 2016/10/13 17:11:09 jdolecek Exp $" ); |
74 | |
75 | #include <sys/param.h> |
76 | #include <sys/malloc.h> |
77 | |
78 | #include <dev/pci/pcireg.h> |
79 | #include <dev/pci/pcivar.h> |
80 | #include <dev/pci/pcidevs.h> |
81 | #include <dev/pci/pciidereg.h> |
82 | #include <dev/pci/pciidevar.h> |
83 | |
84 | #include <dev/ic/wdcreg.h> |
85 | |
86 | #ifdef ATADEBUG |
87 | int atadebug_pciide_mask = 0; |
88 | #endif |
89 | |
90 | #if NATA_DMA |
91 | static const char dmaerrfmt[] = |
92 | "%s:%d: unable to %s table DMA map for drive %d, error=%d\n" ; |
93 | #endif |
94 | |
95 | /* Default product description for devices not known from this controller */ |
96 | const struct pciide_product_desc default_product_desc = { |
97 | 0, |
98 | 0, |
99 | "Generic PCI IDE controller" , |
100 | default_chip_map, |
101 | }; |
102 | |
103 | const struct pciide_product_desc * |
104 | pciide_lookup_product(pcireg_t id, const struct pciide_product_desc *pp) |
105 | { |
106 | for (; pp->chip_map != NULL; pp++) |
107 | if (PCI_PRODUCT(id) == pp->ide_product) |
108 | break; |
109 | |
110 | if (pp->chip_map == NULL) |
111 | return NULL; |
112 | return pp; |
113 | } |
114 | |
115 | void |
116 | pciide_common_attach(struct pciide_softc *sc, const struct pci_attach_args *pa, |
117 | const struct pciide_product_desc *pp) |
118 | { |
119 | pci_chipset_tag_t pc = pa->pa_pc; |
120 | pcitag_t tag = pa->pa_tag; |
121 | #if NATA_DMA |
122 | pcireg_t csr; |
123 | #endif |
124 | const char *displaydev = NULL; |
125 | int dontprint = 0; |
126 | |
127 | sc->sc_pci_id = pa->pa_id; |
128 | if (pp == NULL) { |
129 | /* should only happen for generic pciide devices */ |
130 | sc->sc_pp = &default_product_desc; |
131 | } else { |
132 | sc->sc_pp = pp; |
133 | /* if ide_name == NULL, printf is done in chip-specific map */ |
134 | if (pp->ide_name) |
135 | displaydev = pp->ide_name; |
136 | else |
137 | dontprint = 1; |
138 | } |
139 | |
140 | if (dontprint) { |
141 | aprint_naive("disk controller\n" ); |
142 | aprint_normal("\n" ); /* ??? */ |
143 | } else |
144 | pci_aprint_devinfo_fancy(pa, "disk controller" , displaydev, 1); |
145 | |
146 | sc->sc_pc = pa->pa_pc; |
147 | sc->sc_tag = pa->pa_tag; |
148 | |
149 | #if NATA_DMA |
150 | /* Set up DMA defaults; these might be adjusted by chip_map. */ |
151 | sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; |
152 | sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; |
153 | #endif |
154 | |
155 | #ifdef ATADEBUG |
156 | if (atadebug_pciide_mask & DEBUG_PROBE) |
157 | pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); |
158 | #endif |
159 | sc->sc_pp->chip_map(sc, pa); |
160 | |
161 | #if NATA_DMA |
162 | if (sc->sc_dma_ok) { |
163 | csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); |
164 | csr |= PCI_COMMAND_MASTER_ENABLE; |
165 | pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); |
166 | } |
167 | #endif |
168 | ATADEBUG_PRINT(("pciide: command/status register=%x\n" , |
169 | pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); |
170 | } |
171 | |
172 | int |
173 | pciide_common_detach(struct pciide_softc *sc, int flags) |
174 | { |
175 | struct pciide_channel *cp; |
176 | struct ata_channel *wdc_cp; |
177 | struct wdc_regs *wdr; |
178 | int channel, drive; |
179 | int rv; |
180 | |
181 | rv = wdcdetach(sc->sc_wdcdev.sc_atac.atac_dev, flags); |
182 | if (rv) |
183 | return rv; |
184 | |
185 | for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; |
186 | channel++) { |
187 | cp = &sc->pciide_channels[channel]; |
188 | wdc_cp = &cp->ata_channel; |
189 | wdr = CHAN_TO_WDC_REGS(wdc_cp); |
190 | |
191 | if (wdc_cp->ch_flags & ATACH_DISABLED) |
192 | continue; |
193 | |
194 | if (wdr->cmd_ios != 0) |
195 | bus_space_unmap(wdr->cmd_iot, |
196 | wdr->cmd_baseioh, wdr->cmd_ios); |
197 | if (cp->compat != 0) { |
198 | if (wdr->ctl_ios != 0) |
199 | bus_space_unmap(wdr->ctl_iot, |
200 | wdr->ctl_ioh, wdr->ctl_ios); |
201 | } else { |
202 | if (cp->ctl_ios != 0) |
203 | bus_space_unmap(wdr->ctl_iot, |
204 | cp->ctl_baseioh, cp->ctl_ios); |
205 | } |
206 | |
207 | for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) { |
208 | #if NATA_DMA |
209 | pciide_dma_table_teardown(sc, channel, drive); |
210 | #endif |
211 | } |
212 | |
213 | free(cp->ata_channel.ch_queue, M_DEVBUF); |
214 | cp->ata_channel.atabus = NULL; |
215 | } |
216 | |
217 | #if NATA_DMA |
218 | if (sc->sc_dma_ios != 0) |
219 | bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_ios); |
220 | if (sc->sc_ba5_ss != 0) |
221 | bus_space_unmap(sc->sc_ba5_st, sc->sc_ba5_sh, sc->sc_ba5_ss); |
222 | #endif |
223 | |
224 | return 0; |
225 | } |
226 | |
227 | int |
228 | pciide_detach(device_t self, int flags) |
229 | { |
230 | struct pciide_softc *sc = device_private(self); |
231 | struct pciide_channel *cp; |
232 | int channel; |
233 | #ifndef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH |
234 | bool has_compat_chan; |
235 | |
236 | has_compat_chan = false; |
237 | for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; |
238 | channel++) { |
239 | cp = &sc->pciide_channels[channel]; |
240 | if (cp->compat != 0) { |
241 | has_compat_chan = true; |
242 | } |
243 | } |
244 | |
245 | if (has_compat_chan != false) |
246 | return EBUSY; |
247 | #endif |
248 | |
249 | for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; |
250 | channel++) { |
251 | cp = &sc->pciide_channels[channel]; |
252 | if (cp->compat != 0) |
253 | if (cp->ih != NULL) { |
254 | pciide_unmap_compat_intr(sc->sc_pc, cp, channel); |
255 | cp->ih = NULL; |
256 | } |
257 | } |
258 | |
259 | if (sc->sc_pci_ih != NULL) { |
260 | pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); |
261 | sc->sc_pci_ih = NULL; |
262 | } |
263 | |
264 | return pciide_common_detach(sc, flags); |
265 | } |
266 | |
267 | /* tell whether the chip is enabled or not */ |
268 | int |
269 | pciide_chipen(struct pciide_softc *sc, const struct pci_attach_args *pa) |
270 | { |
271 | pcireg_t csr; |
272 | |
273 | if ((pa->pa_flags & PCI_FLAGS_IO_OKAY) == 0) { |
274 | aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
275 | "I/O access disabled at bridge\n" ); |
276 | return 0; |
277 | } |
278 | csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); |
279 | if ((csr & PCI_COMMAND_IO_ENABLE) == 0) { |
280 | aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
281 | "I/O access disabled at device\n" ); |
282 | return 0; |
283 | } |
284 | return 1; |
285 | } |
286 | |
287 | void |
288 | pciide_mapregs_compat(const struct pci_attach_args *pa, |
289 | struct pciide_channel *cp, int compatchan) |
290 | { |
291 | struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); |
292 | struct ata_channel *wdc_cp = &cp->ata_channel; |
293 | struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); |
294 | int i; |
295 | |
296 | cp->compat = 1; |
297 | |
298 | wdr->cmd_iot = pa->pa_iot; |
299 | if (bus_space_map(wdr->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), |
300 | PCIIDE_COMPAT_CMD_SIZE, 0, &wdr->cmd_baseioh) != 0) { |
301 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
302 | "couldn't map %s channel cmd regs\n" , cp->name); |
303 | goto bad; |
304 | } |
305 | wdr->cmd_ios = PCIIDE_COMPAT_CMD_SIZE; |
306 | |
307 | wdr->ctl_iot = pa->pa_iot; |
308 | if (bus_space_map(wdr->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), |
309 | PCIIDE_COMPAT_CTL_SIZE, 0, &wdr->ctl_ioh) != 0) { |
310 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
311 | "couldn't map %s channel ctl regs\n" , cp->name); |
312 | bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); |
313 | goto bad; |
314 | } |
315 | wdr->ctl_ios = PCIIDE_COMPAT_CTL_SIZE; |
316 | |
317 | for (i = 0; i < WDC_NREG; i++) { |
318 | if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, |
319 | i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { |
320 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
321 | "couldn't subregion %s channel cmd regs\n" , |
322 | cp->name); |
323 | goto bad; |
324 | } |
325 | } |
326 | wdc_init_shadow_regs(wdc_cp); |
327 | wdr->data32iot = wdr->cmd_iot; |
328 | wdr->data32ioh = wdr->cmd_iohs[0]; |
329 | return; |
330 | |
331 | bad: |
332 | cp->ata_channel.ch_flags |= ATACH_DISABLED; |
333 | return; |
334 | } |
335 | |
336 | void |
337 | pciide_mapregs_native(const struct pci_attach_args *pa, |
338 | struct pciide_channel *cp, int (*pci_intr)(void *)) |
339 | { |
340 | struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); |
341 | struct ata_channel *wdc_cp = &cp->ata_channel; |
342 | struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); |
343 | const char *intrstr; |
344 | pci_intr_handle_t intrhandle; |
345 | int i; |
346 | char intrbuf[PCI_INTRSTR_LEN]; |
347 | |
348 | cp->compat = 0; |
349 | |
350 | if (sc->sc_pci_ih == NULL) { |
351 | if (pci_intr_map(pa, &intrhandle) != 0) { |
352 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
353 | "couldn't map native-PCI interrupt\n" ); |
354 | goto bad; |
355 | } |
356 | intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf, sizeof(intrbuf)); |
357 | sc->sc_pci_ih = pci_intr_establish_xname(pa->pa_pc, |
358 | intrhandle, IPL_BIO, pci_intr, sc, |
359 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev)); |
360 | if (sc->sc_pci_ih != NULL) { |
361 | aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
362 | "using %s for native-PCI interrupt\n" , |
363 | intrstr ? intrstr : "unknown interrupt" ); |
364 | } else { |
365 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
366 | "couldn't establish native-PCI interrupt" ); |
367 | if (intrstr != NULL) |
368 | aprint_error(" at %s" , intrstr); |
369 | aprint_error("\n" ); |
370 | goto bad; |
371 | } |
372 | } |
373 | cp->ih = sc->sc_pci_ih; |
374 | if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->ch_channel), |
375 | PCI_MAPREG_TYPE_IO, 0, |
376 | &wdr->cmd_iot, &wdr->cmd_baseioh, NULL, &wdr->cmd_ios) != 0) { |
377 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
378 | "couldn't map %s channel cmd regs\n" , cp->name); |
379 | goto bad; |
380 | } |
381 | |
382 | if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->ch_channel), |
383 | PCI_MAPREG_TYPE_IO, 0, |
384 | &wdr->ctl_iot, &cp->ctl_baseioh, NULL, &cp->ctl_ios) != 0) { |
385 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
386 | "couldn't map %s channel ctl regs\n" , cp->name); |
387 | bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); |
388 | goto bad; |
389 | } |
390 | /* |
391 | * In native mode, 4 bytes of I/O space are mapped for the control |
392 | * register, the control register is at offset 2. Pass the generic |
393 | * code a handle for only one byte at the right offset. |
394 | */ |
395 | if (bus_space_subregion(wdr->ctl_iot, cp->ctl_baseioh, 2, 1, |
396 | &wdr->ctl_ioh) != 0) { |
397 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
398 | "unable to subregion %s channel ctl regs\n" , cp->name); |
399 | bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); |
400 | bus_space_unmap(wdr->cmd_iot, cp->ctl_baseioh, cp->ctl_ios); |
401 | goto bad; |
402 | } |
403 | |
404 | for (i = 0; i < WDC_NREG; i++) { |
405 | if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, |
406 | i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { |
407 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
408 | "couldn't subregion %s channel cmd regs\n" , |
409 | cp->name); |
410 | goto bad; |
411 | } |
412 | } |
413 | wdc_init_shadow_regs(wdc_cp); |
414 | wdr->data32iot = wdr->cmd_iot; |
415 | wdr->data32ioh = wdr->cmd_iohs[0]; |
416 | return; |
417 | |
418 | bad: |
419 | cp->ata_channel.ch_flags |= ATACH_DISABLED; |
420 | return; |
421 | } |
422 | |
423 | #if NATA_DMA |
424 | void |
425 | pciide_mapreg_dma(struct pciide_softc *sc, const struct pci_attach_args *pa) |
426 | { |
427 | pcireg_t maptype; |
428 | bus_addr_t addr; |
429 | struct pciide_channel *pc; |
430 | int reg, chan; |
431 | bus_size_t size; |
432 | |
433 | /* |
434 | * Map DMA registers |
435 | * |
436 | * Note that sc_dma_ok is the right variable to test to see if |
437 | * DMA can be done. If the interface doesn't support DMA, |
438 | * sc_dma_ok will never be non-zero. If the DMA regs couldn't |
439 | * be mapped, it'll be zero. I.e., sc_dma_ok will only be |
440 | * non-zero if the interface supports DMA and the registers |
441 | * could be mapped. |
442 | * |
443 | * XXX Note that despite the fact that the Bus Master IDE specs |
444 | * XXX say that "The bus master IDE function uses 16 bytes of IO |
445 | * XXX space," some controllers (at least the United |
446 | * XXX Microelectronics UM8886BF) place it in memory space. |
447 | */ |
448 | maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, |
449 | PCIIDE_REG_BUS_MASTER_DMA); |
450 | |
451 | switch (maptype) { |
452 | case PCI_MAPREG_TYPE_IO: |
453 | sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, |
454 | PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, |
455 | &addr, NULL, NULL) == 0); |
456 | if (sc->sc_dma_ok == 0) { |
457 | aprint_verbose( |
458 | ", but unused (couldn't query registers)" ); |
459 | break; |
460 | } |
461 | if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) |
462 | && addr >= 0x10000) { |
463 | sc->sc_dma_ok = 0; |
464 | aprint_verbose( |
465 | ", but unused (registers at unsafe address " |
466 | "%#lx)" , (unsigned long)addr); |
467 | break; |
468 | } |
469 | /* FALLTHROUGH */ |
470 | |
471 | case PCI_MAPREG_MEM_TYPE_32BIT: |
472 | sc->sc_dma_ok = (pci_mapreg_map(pa, |
473 | PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, |
474 | &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_ios) |
475 | == 0); |
476 | sc->sc_dmat = pa->pa_dmat; |
477 | if (sc->sc_dma_ok == 0) { |
478 | aprint_verbose(", but unused (couldn't map registers)" ); |
479 | } else { |
480 | sc->sc_wdcdev.dma_arg = sc; |
481 | sc->sc_wdcdev.dma_init = pciide_dma_init; |
482 | sc->sc_wdcdev.dma_start = pciide_dma_start; |
483 | sc->sc_wdcdev.dma_finish = pciide_dma_finish; |
484 | } |
485 | |
486 | if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & |
487 | PCIIDE_OPTIONS_NODMA) { |
488 | aprint_verbose( |
489 | ", but unused (forced off by config file)" ); |
490 | sc->sc_dma_ok = 0; |
491 | } |
492 | break; |
493 | |
494 | default: |
495 | sc->sc_dma_ok = 0; |
496 | aprint_verbose( |
497 | ", but unsupported register maptype (0x%x)" , maptype); |
498 | } |
499 | |
500 | if (sc->sc_dma_ok == 0) |
501 | return; |
502 | |
503 | /* |
504 | * Set up the default handles for the DMA registers. |
505 | * Just reserve 32 bits for each handle, unless space |
506 | * doesn't permit it. |
507 | */ |
508 | for (chan = 0; chan < PCIIDE_NUM_CHANNELS; chan++) { |
509 | pc = &sc->pciide_channels[chan]; |
510 | for (reg = 0; reg < IDEDMA_NREGS; reg++) { |
511 | size = 4; |
512 | if (size > (IDEDMA_SCH_OFFSET - reg)) |
513 | size = IDEDMA_SCH_OFFSET - reg; |
514 | if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh, |
515 | IDEDMA_SCH_OFFSET * chan + reg, size, |
516 | &pc->dma_iohs[reg]) != 0) { |
517 | sc->sc_dma_ok = 0; |
518 | aprint_verbose(", but can't subregion offset %d " |
519 | "size %lu" , reg, (u_long)size); |
520 | return; |
521 | } |
522 | } |
523 | } |
524 | } |
525 | #endif /* NATA_DMA */ |
526 | |
527 | int |
528 | pciide_compat_intr(void *arg) |
529 | { |
530 | struct pciide_channel *cp = arg; |
531 | |
532 | #ifdef DIAGNOSTIC |
533 | /* should only be called for a compat channel */ |
534 | if (cp->compat == 0) |
535 | panic("pciide compat intr called for non-compat chan %p" , cp); |
536 | #endif |
537 | return (wdcintr(&cp->ata_channel)); |
538 | } |
539 | |
540 | int |
541 | pciide_pci_intr(void *arg) |
542 | { |
543 | struct pciide_softc *sc = arg; |
544 | struct pciide_channel *cp; |
545 | struct ata_channel *wdc_cp; |
546 | int i, rv, crv; |
547 | |
548 | rv = 0; |
549 | for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) { |
550 | cp = &sc->pciide_channels[i]; |
551 | wdc_cp = &cp->ata_channel; |
552 | |
553 | /* If a compat channel skip. */ |
554 | if (cp->compat) |
555 | continue; |
556 | /* if this channel not waiting for intr, skip */ |
557 | if ((wdc_cp->ch_flags & ATACH_IRQ_WAIT) == 0) |
558 | continue; |
559 | |
560 | crv = wdcintr(wdc_cp); |
561 | if (crv == 0) |
562 | ; /* leave rv alone */ |
563 | else if (crv == 1) |
564 | rv = 1; /* claim the intr */ |
565 | else if (rv == 0) /* crv should be -1 in this case */ |
566 | rv = crv; /* if we've done no better, take it */ |
567 | } |
568 | return (rv); |
569 | } |
570 | |
571 | #if NATA_DMA |
572 | void |
573 | pciide_channel_dma_setup(struct pciide_channel *cp) |
574 | { |
575 | int drive, s; |
576 | struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); |
577 | struct ata_drive_datas *drvp; |
578 | |
579 | KASSERT(cp->ata_channel.ch_ndrives != 0); |
580 | |
581 | for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) { |
582 | drvp = &cp->ata_channel.ch_drive[drive]; |
583 | /* If no drive, skip */ |
584 | if (drvp->drive_type == ATA_DRIVET_NONE) |
585 | continue; |
586 | /* setup DMA if needed */ |
587 | if (((drvp->drive_flags & ATA_DRIVE_DMA) == 0 && |
588 | (drvp->drive_flags & ATA_DRIVE_UDMA) == 0) || |
589 | sc->sc_dma_ok == 0) { |
590 | s = splbio(); |
591 | drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); |
592 | splx(s); |
593 | continue; |
594 | } |
595 | if (pciide_dma_table_setup(sc, cp->ata_channel.ch_channel, |
596 | drive) != 0) { |
597 | /* Abort DMA setup */ |
598 | s = splbio(); |
599 | drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); |
600 | splx(s); |
601 | continue; |
602 | } |
603 | } |
604 | } |
605 | |
606 | #define NIDEDMA_TABLES(sc) \ |
607 | (MAXPHYS/(min((sc)->sc_dma_maxsegsz, PAGE_SIZE)) + 1) |
608 | |
609 | int |
610 | pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) |
611 | { |
612 | int error; |
613 | const bus_size_t dma_table_size = |
614 | sizeof(struct idedma_table) * NIDEDMA_TABLES(sc); |
615 | struct pciide_dma_maps *dma_maps = |
616 | &sc->pciide_channels[channel].dma_maps[drive]; |
617 | |
618 | /* If table was already allocated, just return */ |
619 | if (dma_maps->dma_table) |
620 | return 0; |
621 | |
622 | /* Allocate memory for the DMA tables and map it */ |
623 | if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, |
624 | IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &dma_maps->dmamap_table_seg, |
625 | 1, &dma_maps->dmamap_table_nseg, BUS_DMA_NOWAIT)) != 0) { |
626 | aprint_error(dmaerrfmt, |
627 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, |
628 | "allocate" , drive, error); |
629 | return error; |
630 | } |
631 | if ((error = bus_dmamem_map(sc->sc_dmat, &dma_maps->dmamap_table_seg, |
632 | dma_maps->dmamap_table_nseg, dma_table_size, |
633 | (void **)&dma_maps->dma_table, |
634 | BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { |
635 | aprint_error(dmaerrfmt, |
636 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, |
637 | "map" , drive, error); |
638 | return error; |
639 | } |
640 | ATADEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " |
641 | "phy 0x%lx\n" , dma_maps->dma_table, (u_long)dma_table_size, |
642 | (unsigned long)dma_maps->dmamap_table_seg.ds_addr), DEBUG_PROBE); |
643 | /* Create and load table DMA map for this disk */ |
644 | if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, |
645 | 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, |
646 | &dma_maps->dmamap_table)) != 0) { |
647 | aprint_error(dmaerrfmt, |
648 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, |
649 | "create" , drive, error); |
650 | return error; |
651 | } |
652 | if ((error = bus_dmamap_load(sc->sc_dmat, |
653 | dma_maps->dmamap_table, |
654 | dma_maps->dma_table, |
655 | dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { |
656 | aprint_error(dmaerrfmt, |
657 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, |
658 | "load" , drive, error); |
659 | return error; |
660 | } |
661 | ATADEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n" , |
662 | (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), |
663 | DEBUG_PROBE); |
664 | /* Create a xfer DMA map for this drive */ |
665 | if ((error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, |
666 | NIDEDMA_TABLES(sc), sc->sc_dma_maxsegsz, sc->sc_dma_boundary, |
667 | BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, |
668 | &dma_maps->dmamap_xfer)) != 0) { |
669 | aprint_error(dmaerrfmt, |
670 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, |
671 | "create xfer" , drive, error); |
672 | return error; |
673 | } |
674 | return 0; |
675 | } |
676 | |
677 | void |
678 | pciide_dma_table_teardown(struct pciide_softc *sc, int channel, int drive) |
679 | { |
680 | struct pciide_channel *cp; |
681 | struct pciide_dma_maps *dma_maps; |
682 | |
683 | cp = &sc->pciide_channels[channel]; |
684 | dma_maps = &cp->dma_maps[drive]; |
685 | |
686 | if (dma_maps->dma_table == NULL) |
687 | return; |
688 | |
689 | bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_xfer); |
690 | bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_table); |
691 | bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_table); |
692 | bus_dmamem_unmap(sc->sc_dmat, dma_maps->dma_table, |
693 | sizeof(struct idedma_table) * NIDEDMA_TABLES(sc)); |
694 | bus_dmamem_free(sc->sc_dmat, &dma_maps->dmamap_table_seg, |
695 | dma_maps->dmamap_table_nseg); |
696 | |
697 | dma_maps->dma_table = NULL; |
698 | |
699 | return; |
700 | } |
701 | |
702 | int |
703 | pciide_dma_dmamap_setup(struct pciide_softc *sc, int channel, int drive, |
704 | void *databuf, size_t datalen, int flags) |
705 | { |
706 | int error, seg; |
707 | struct pciide_channel *cp = &sc->pciide_channels[channel]; |
708 | struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; |
709 | |
710 | error = bus_dmamap_load(sc->sc_dmat, |
711 | dma_maps->dmamap_xfer, |
712 | databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | |
713 | ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); |
714 | if (error) { |
715 | aprint_error(dmaerrfmt, |
716 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, |
717 | "load xfer" , drive, error); |
718 | return error; |
719 | } |
720 | |
721 | bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, |
722 | dma_maps->dmamap_xfer->dm_mapsize, |
723 | (flags & WDC_DMA_READ) ? |
724 | BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); |
725 | |
726 | for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { |
727 | #ifdef DIAGNOSTIC |
728 | /* A segment must not cross a 64k boundary */ |
729 | { |
730 | u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; |
731 | u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; |
732 | if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != |
733 | ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { |
734 | printf("pciide_dma: segment %d physical addr 0x%lx" |
735 | " len 0x%lx not properly aligned\n" , |
736 | seg, phys, len); |
737 | panic("pciide_dma: buf align" ); |
738 | } |
739 | } |
740 | #endif |
741 | dma_maps->dma_table[seg].base_addr = |
742 | htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); |
743 | dma_maps->dma_table[seg].byte_count = |
744 | htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & |
745 | IDEDMA_BYTE_COUNT_MASK); |
746 | ATADEBUG_PRINT(("\t seg %d len %d addr 0x%x\n" , |
747 | seg, le32toh(dma_maps->dma_table[seg].byte_count), |
748 | le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); |
749 | |
750 | } |
751 | dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= |
752 | htole32(IDEDMA_BYTE_COUNT_EOT); |
753 | |
754 | bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, |
755 | dma_maps->dmamap_table->dm_mapsize, |
756 | BUS_DMASYNC_PREWRITE); |
757 | |
758 | #ifdef DIAGNOSTIC |
759 | if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { |
760 | printf("pciide_dma_dmamap_setup: addr 0x%lx " |
761 | "not properly aligned\n" , |
762 | (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); |
763 | panic("pciide_dma_init: table align" ); |
764 | } |
765 | #endif |
766 | /* remember flags */ |
767 | dma_maps->dma_flags = flags; |
768 | |
769 | return 0; |
770 | } |
771 | |
772 | int |
773 | pciide_dma_init(void *v, int channel, int drive, void *databuf, size_t datalen, |
774 | int flags) |
775 | { |
776 | struct pciide_softc *sc = v; |
777 | int error; |
778 | struct pciide_channel *cp = &sc->pciide_channels[channel]; |
779 | struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; |
780 | |
781 | if ((error = pciide_dma_dmamap_setup(sc, channel, drive, |
782 | databuf, datalen, flags)) != 0) |
783 | return error; |
784 | /* Maps are ready. Start DMA function */ |
785 | /* Clear status bits */ |
786 | bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, |
787 | bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); |
788 | /* Write table addr */ |
789 | bus_space_write_4(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_TBL], 0, |
790 | dma_maps->dmamap_table->dm_segs[0].ds_addr); |
791 | /* set read/write */ |
792 | bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, |
793 | ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); |
794 | return 0; |
795 | } |
796 | |
797 | void |
798 | pciide_dma_start(void *v, int channel, int drive) |
799 | { |
800 | struct pciide_softc *sc = v; |
801 | struct pciide_channel *cp = &sc->pciide_channels[channel]; |
802 | |
803 | ATADEBUG_PRINT(("pciide_dma_start\n" ),DEBUG_XFERS); |
804 | bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, |
805 | bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) |
806 | | IDEDMA_CMD_START); |
807 | } |
808 | |
809 | int |
810 | pciide_dma_finish(void *v, int channel, int drive, int force) |
811 | { |
812 | struct pciide_softc *sc = v; |
813 | u_int8_t status; |
814 | int error = 0; |
815 | struct pciide_channel *cp = &sc->pciide_channels[channel]; |
816 | struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; |
817 | |
818 | status = bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0); |
819 | ATADEBUG_PRINT(("pciide_dma_finish: status 0x%x\n" , status), |
820 | DEBUG_XFERS); |
821 | |
822 | if (force == WDC_DMAEND_END && (status & IDEDMA_CTL_INTR) == 0) |
823 | return WDC_DMAST_NOIRQ; |
824 | |
825 | /* stop DMA channel */ |
826 | bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, |
827 | bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) |
828 | & ~IDEDMA_CMD_START); |
829 | |
830 | /* Unload the map of the data buffer */ |
831 | bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, |
832 | dma_maps->dmamap_xfer->dm_mapsize, |
833 | (dma_maps->dma_flags & WDC_DMA_READ) ? |
834 | BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); |
835 | bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); |
836 | |
837 | if ((status & IDEDMA_CTL_ERR) != 0 && force != WDC_DMAEND_ABRT_QUIET) { |
838 | aprint_error("%s:%d:%d: bus-master DMA error: status=0x%x\n" , |
839 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, |
840 | drive, status); |
841 | error |= WDC_DMAST_ERR; |
842 | } |
843 | |
844 | if ((status & IDEDMA_CTL_INTR) == 0 && force != WDC_DMAEND_ABRT_QUIET) { |
845 | aprint_error("%s:%d:%d: bus-master DMA error: missing " |
846 | "interrupt, status=0x%x\n" , |
847 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), |
848 | channel, drive, status); |
849 | error |= WDC_DMAST_NOIRQ; |
850 | } |
851 | |
852 | if ((status & IDEDMA_CTL_ACT) != 0 && force != WDC_DMAEND_ABRT_QUIET) { |
853 | /* data underrun, may be a valid condition for ATAPI */ |
854 | error |= WDC_DMAST_UNDER; |
855 | } |
856 | return error; |
857 | } |
858 | |
859 | void |
860 | pciide_irqack(struct ata_channel *chp) |
861 | { |
862 | struct pciide_channel *cp = CHAN_TO_PCHAN(chp); |
863 | struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); |
864 | |
865 | /* clear status bits in IDE DMA registers */ |
866 | bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, |
867 | bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); |
868 | } |
869 | #endif /* NATA_DMA */ |
870 | |
871 | /* some common code used by several chip_map */ |
872 | int |
873 | pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) |
874 | { |
875 | struct pciide_channel *cp = &sc->pciide_channels[channel]; |
876 | sc->wdc_chanarray[channel] = &cp->ata_channel; |
877 | cp->name = PCIIDE_CHANNEL_NAME(channel); |
878 | cp->ata_channel.ch_channel = channel; |
879 | cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; |
880 | cp->ata_channel.ch_queue = |
881 | malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT|M_ZERO); |
882 | if (cp->ata_channel.ch_queue == NULL) { |
883 | aprint_error("%s %s channel: " |
884 | "can't allocate memory for command queue" , |
885 | device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name); |
886 | return 0; |
887 | } |
888 | aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
889 | "%s channel %s to %s mode\n" , cp->name, |
890 | (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? |
891 | "configured" : "wired" , |
892 | (interface & PCIIDE_INTERFACE_PCI(channel)) ? |
893 | "native-PCI" : "compatibility" ); |
894 | return 1; |
895 | } |
896 | |
897 | /* some common code used by several chip channel_map */ |
898 | void |
899 | pciide_mapchan(const struct pci_attach_args *pa, struct pciide_channel *cp, |
900 | pcireg_t interface, int (*pci_intr)(void *)) |
901 | { |
902 | struct ata_channel *wdc_cp = &cp->ata_channel; |
903 | |
904 | if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) |
905 | pciide_mapregs_native(pa, cp, pci_intr); |
906 | else { |
907 | pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel); |
908 | if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0) |
909 | pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel); |
910 | } |
911 | wdcattach(wdc_cp); |
912 | } |
913 | |
914 | /* |
915 | * generic code to map the compat intr. |
916 | */ |
917 | void |
918 | pciide_map_compat_intr(const struct pci_attach_args *pa, |
919 | struct pciide_channel *cp, int compatchan) |
920 | { |
921 | struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); |
922 | |
923 | #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH |
924 | cp->ih = |
925 | pciide_machdep_compat_intr_establish(sc->sc_wdcdev.sc_atac.atac_dev, |
926 | pa, compatchan, pciide_compat_intr, cp); |
927 | if (cp->ih == NULL) { |
928 | #endif |
929 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
930 | "no compatibility interrupt for use by %s " |
931 | "channel\n" , cp->name); |
932 | cp->ata_channel.ch_flags |= ATACH_DISABLED; |
933 | #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH |
934 | } |
935 | #endif |
936 | } |
937 | |
938 | void |
939 | pciide_unmap_compat_intr(pci_chipset_tag_t pc, struct pciide_channel *cp, |
940 | int compatchan) |
941 | { |
942 | #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH |
943 | struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); |
944 | |
945 | pciide_machdep_compat_intr_disestablish(sc->sc_wdcdev.sc_atac.atac_dev, |
946 | sc->sc_pc, compatchan, cp->ih); |
947 | #endif |
948 | } |
949 | |
950 | void |
951 | default_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) |
952 | { |
953 | struct pciide_channel *cp; |
954 | pcireg_t interface = PCI_INTERFACE(pa->pa_class); |
955 | pcireg_t csr; |
956 | int channel; |
957 | #if NATA_DMA |
958 | int drive; |
959 | u_int8_t idedma_ctl; |
960 | #endif |
961 | const char *failreason; |
962 | struct wdc_regs *wdr; |
963 | |
964 | if (pciide_chipen(sc, pa) == 0) |
965 | return; |
966 | |
967 | if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { |
968 | #if NATA_DMA |
969 | aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
970 | "bus-master DMA support present" ); |
971 | if (sc->sc_pp == &default_product_desc && |
972 | (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & |
973 | PCIIDE_OPTIONS_DMA) == 0) { |
974 | aprint_verbose(", but unused (no driver support)" ); |
975 | sc->sc_dma_ok = 0; |
976 | } else { |
977 | pciide_mapreg_dma(sc, pa); |
978 | if (sc->sc_dma_ok != 0) |
979 | aprint_verbose(", used without full driver " |
980 | "support" ); |
981 | } |
982 | #else |
983 | aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
984 | "bus-master DMA support present, but unused (no driver " |
985 | "support)" ); |
986 | #endif /* NATA_DMA */ |
987 | } else { |
988 | aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
989 | "hardware does not support DMA" ); |
990 | #if NATA_DMA |
991 | sc->sc_dma_ok = 0; |
992 | #endif |
993 | } |
994 | aprint_verbose("\n" ); |
995 | #if NATA_DMA |
996 | if (sc->sc_dma_ok) { |
997 | sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; |
998 | sc->sc_wdcdev.irqack = pciide_irqack; |
999 | } |
1000 | #endif |
1001 | sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; |
1002 | #if NATA_DMA |
1003 | sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; |
1004 | #endif |
1005 | |
1006 | sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; |
1007 | sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; |
1008 | sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; |
1009 | sc->sc_wdcdev.wdc_maxdrives = 2; |
1010 | |
1011 | wdc_allocate_regs(&sc->sc_wdcdev); |
1012 | |
1013 | for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; |
1014 | channel++) { |
1015 | cp = &sc->pciide_channels[channel]; |
1016 | if (pciide_chansetup(sc, channel, interface) == 0) |
1017 | continue; |
1018 | wdr = CHAN_TO_WDC_REGS(&cp->ata_channel); |
1019 | if (interface & PCIIDE_INTERFACE_PCI(channel)) |
1020 | pciide_mapregs_native(pa, cp, pciide_pci_intr); |
1021 | else |
1022 | pciide_mapregs_compat(pa, cp, |
1023 | cp->ata_channel.ch_channel); |
1024 | if (cp->ata_channel.ch_flags & ATACH_DISABLED) |
1025 | continue; |
1026 | /* |
1027 | * Check to see if something appears to be there. |
1028 | */ |
1029 | failreason = NULL; |
1030 | /* |
1031 | * In native mode, always enable the controller. It's |
1032 | * not possible to have an ISA board using the same address |
1033 | * anyway. |
1034 | */ |
1035 | if (interface & PCIIDE_INTERFACE_PCI(channel)) { |
1036 | wdcattach(&cp->ata_channel); |
1037 | continue; |
1038 | } |
1039 | if (!wdcprobe(&cp->ata_channel)) { |
1040 | failreason = "not responding; disabled or no drives?" ; |
1041 | goto next; |
1042 | } |
1043 | /* |
1044 | * Now, make sure it's actually attributable to this PCI IDE |
1045 | * channel by trying to access the channel again while the |
1046 | * PCI IDE controller's I/O space is disabled. (If the |
1047 | * channel no longer appears to be there, it belongs to |
1048 | * this controller.) YUCK! |
1049 | */ |
1050 | csr = pci_conf_read(sc->sc_pc, sc->sc_tag, |
1051 | PCI_COMMAND_STATUS_REG); |
1052 | pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, |
1053 | csr & ~PCI_COMMAND_IO_ENABLE); |
1054 | if (wdcprobe(&cp->ata_channel)) |
1055 | failreason = "other hardware responding at addresses" ; |
1056 | pci_conf_write(sc->sc_pc, sc->sc_tag, |
1057 | PCI_COMMAND_STATUS_REG, csr); |
1058 | next: |
1059 | if (failreason) { |
1060 | aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, |
1061 | "%s channel ignored (%s)\n" , cp->name, failreason); |
1062 | cp->ata_channel.ch_flags |= ATACH_DISABLED; |
1063 | bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, |
1064 | wdr->cmd_ios); |
1065 | bus_space_unmap(wdr->ctl_iot, wdr->ctl_ioh, |
1066 | wdr->ctl_ios); |
1067 | } else { |
1068 | pciide_map_compat_intr(pa, cp, |
1069 | cp->ata_channel.ch_channel); |
1070 | wdcattach(&cp->ata_channel); |
1071 | } |
1072 | } |
1073 | |
1074 | #if NATA_DMA |
1075 | if (sc->sc_dma_ok == 0) |
1076 | return; |
1077 | |
1078 | /* Allocate DMA maps */ |
1079 | for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; |
1080 | channel++) { |
1081 | idedma_ctl = 0; |
1082 | cp = &sc->pciide_channels[channel]; |
1083 | for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) { |
1084 | /* |
1085 | * we have not probed the drives yet, allocate |
1086 | * ressources for all of them. |
1087 | */ |
1088 | if (pciide_dma_table_setup(sc, channel, drive) != 0) { |
1089 | /* Abort DMA setup */ |
1090 | aprint_error( |
1091 | "%s:%d:%d: can't allocate DMA maps, " |
1092 | "using PIO transfers\n" , |
1093 | device_xname( |
1094 | sc->sc_wdcdev.sc_atac.atac_dev), |
1095 | channel, drive); |
1096 | sc->sc_dma_ok = 0; |
1097 | sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA; |
1098 | sc->sc_wdcdev.irqack = NULL; |
1099 | break; |
1100 | } |
1101 | idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); |
1102 | } |
1103 | if (idedma_ctl != 0) { |
1104 | /* Add software bits in status register */ |
1105 | bus_space_write_1(sc->sc_dma_iot, |
1106 | cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl); |
1107 | } |
1108 | } |
1109 | #endif /* NATA_DMA */ |
1110 | } |
1111 | |
1112 | void |
1113 | sata_setup_channel(struct ata_channel *chp) |
1114 | { |
1115 | #if NATA_DMA |
1116 | struct ata_drive_datas *drvp; |
1117 | int drive; |
1118 | #if NATA_UDMA |
1119 | int s; |
1120 | #endif |
1121 | u_int32_t idedma_ctl; |
1122 | struct pciide_channel *cp = CHAN_TO_PCHAN(chp); |
1123 | struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); |
1124 | |
1125 | /* setup DMA if needed */ |
1126 | pciide_channel_dma_setup(cp); |
1127 | |
1128 | idedma_ctl = 0; |
1129 | |
1130 | KASSERT(cp->ata_channel.ch_ndrives != 0); |
1131 | for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) { |
1132 | drvp = &chp->ch_drive[drive]; |
1133 | /* If no drive, skip */ |
1134 | if (drvp->drive_type == ATA_DRIVET_NONE) |
1135 | continue; |
1136 | #if NATA_UDMA |
1137 | if (drvp->drive_flags & ATA_DRIVE_UDMA) { |
1138 | /* use Ultra/DMA */ |
1139 | s = splbio(); |
1140 | drvp->drive_flags &= ~ATA_DRIVE_DMA; |
1141 | splx(s); |
1142 | idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); |
1143 | } else |
1144 | #endif /* NATA_UDMA */ |
1145 | if (drvp->drive_flags & ATA_DRIVE_DMA) { |
1146 | idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); |
1147 | } |
1148 | } |
1149 | |
1150 | /* |
1151 | * Nothing to do to setup modes; it is meaningless in S-ATA |
1152 | * (but many S-ATA drives still want to get the SET_FEATURE |
1153 | * command). |
1154 | */ |
1155 | if (idedma_ctl != 0) { |
1156 | /* Add software bits in status register */ |
1157 | bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, |
1158 | idedma_ctl); |
1159 | } |
1160 | #endif /* NATA_DMA */ |
1161 | } |
1162 | |