1 | /* $NetBSD: aic6915.c,v 1.33 2016/07/07 06:55:41 msaitoh Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2001 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /* |
33 | * Device driver for the Adaptec AIC-6915 (``Starfire'') |
34 | * 10/100 Ethernet controller. |
35 | */ |
36 | |
37 | #include <sys/cdefs.h> |
38 | __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.33 2016/07/07 06:55:41 msaitoh Exp $" ); |
39 | |
40 | |
41 | #include <sys/param.h> |
42 | #include <sys/systm.h> |
43 | #include <sys/callout.h> |
44 | #include <sys/mbuf.h> |
45 | #include <sys/malloc.h> |
46 | #include <sys/kernel.h> |
47 | #include <sys/socket.h> |
48 | #include <sys/ioctl.h> |
49 | #include <sys/errno.h> |
50 | #include <sys/device.h> |
51 | |
52 | #include <net/if.h> |
53 | #include <net/if_dl.h> |
54 | #include <net/if_media.h> |
55 | #include <net/if_ether.h> |
56 | |
57 | #include <net/bpf.h> |
58 | |
59 | #include <sys/bus.h> |
60 | #include <sys/intr.h> |
61 | |
62 | #include <dev/mii/miivar.h> |
63 | |
64 | #include <dev/ic/aic6915reg.h> |
65 | #include <dev/ic/aic6915var.h> |
66 | |
67 | static void sf_start(struct ifnet *); |
68 | static void sf_watchdog(struct ifnet *); |
69 | static int sf_ioctl(struct ifnet *, u_long, void *); |
70 | static int sf_init(struct ifnet *); |
71 | static void sf_stop(struct ifnet *, int); |
72 | |
73 | static bool sf_shutdown(device_t, int); |
74 | |
75 | static void sf_txintr(struct sf_softc *); |
76 | static void sf_rxintr(struct sf_softc *); |
77 | static void sf_stats_update(struct sf_softc *); |
78 | |
79 | static void sf_reset(struct sf_softc *); |
80 | static void sf_macreset(struct sf_softc *); |
81 | static void sf_rxdrain(struct sf_softc *); |
82 | static int sf_add_rxbuf(struct sf_softc *, int); |
83 | static uint8_t sf_read_eeprom(struct sf_softc *, int); |
84 | static void sf_set_filter(struct sf_softc *); |
85 | |
86 | static int sf_mii_read(device_t, int, int); |
87 | static void sf_mii_write(device_t, int, int, int); |
88 | static void sf_mii_statchg(struct ifnet *); |
89 | |
90 | static void sf_tick(void *); |
91 | |
92 | #define sf_funcreg_read(sc, reg) \ |
93 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg)) |
94 | #define sf_funcreg_write(sc, reg, val) \ |
95 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val)) |
96 | |
97 | static inline uint32_t |
98 | sf_reg_read(struct sf_softc *sc, bus_addr_t reg) |
99 | { |
100 | |
101 | if (__predict_false(sc->sc_iomapped)) { |
102 | bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, |
103 | reg); |
104 | return (bus_space_read_4(sc->sc_st, sc->sc_sh, |
105 | SF_IndirectIoDataPort)); |
106 | } |
107 | |
108 | return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg)); |
109 | } |
110 | |
111 | static inline void |
112 | sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val) |
113 | { |
114 | |
115 | if (__predict_false(sc->sc_iomapped)) { |
116 | bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, |
117 | reg); |
118 | bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort, |
119 | val); |
120 | return; |
121 | } |
122 | |
123 | bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val); |
124 | } |
125 | |
126 | #define sf_genreg_read(sc, reg) \ |
127 | sf_reg_read((sc), (reg) + SF_GENREG_OFFSET) |
128 | #define sf_genreg_write(sc, reg, val) \ |
129 | sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val)) |
130 | |
131 | /* |
132 | * sf_attach: |
133 | * |
134 | * Attach a Starfire interface to the system. |
135 | */ |
136 | void |
137 | sf_attach(struct sf_softc *sc) |
138 | { |
139 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
140 | int i, rseg, error; |
141 | bus_dma_segment_t seg; |
142 | u_int8_t enaddr[ETHER_ADDR_LEN]; |
143 | |
144 | callout_init(&sc->sc_tick_callout, 0); |
145 | |
146 | /* |
147 | * If we're I/O mapped, the functional register handle is |
148 | * the same as the base handle. If we're memory mapped, |
149 | * carve off a chunk of the register space for the functional |
150 | * registers, to save on arithmetic later. |
151 | */ |
152 | if (sc->sc_iomapped) |
153 | sc->sc_sh_func = sc->sc_sh; |
154 | else { |
155 | if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh, |
156 | SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) { |
157 | aprint_error_dev(sc->sc_dev, "unable to sub-region " |
158 | "functional registers, error = %d\n" , error); |
159 | return; |
160 | } |
161 | } |
162 | |
163 | /* |
164 | * Initialize the transmit threshold for this interface. The |
165 | * manual describes the default as 4 * 16 bytes. We start out |
166 | * at 10 * 16 bytes, to avoid a bunch of initial underruns on |
167 | * several platforms. |
168 | */ |
169 | sc->sc_txthresh = 10; |
170 | |
171 | /* |
172 | * Allocate the control data structures, and create and load the |
173 | * DMA map for it. |
174 | */ |
175 | if ((error = bus_dmamem_alloc(sc->sc_dmat, |
176 | sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, |
177 | BUS_DMA_NOWAIT)) != 0) { |
178 | aprint_error_dev(sc->sc_dev, |
179 | "unable to allocate control data, error = %d\n" , error); |
180 | goto fail_0; |
181 | } |
182 | |
183 | if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, |
184 | sizeof(struct sf_control_data), (void **)&sc->sc_control_data, |
185 | BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { |
186 | aprint_error_dev(sc->sc_dev, |
187 | "unable to map control data, error = %d\n" , error); |
188 | goto fail_1; |
189 | } |
190 | |
191 | if ((error = bus_dmamap_create(sc->sc_dmat, |
192 | sizeof(struct sf_control_data), 1, |
193 | sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT, |
194 | &sc->sc_cddmamap)) != 0) { |
195 | aprint_error_dev(sc->sc_dev, "unable to create control data " |
196 | "DMA map, error = %d\n" , error); |
197 | goto fail_2; |
198 | } |
199 | |
200 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, |
201 | sc->sc_control_data, sizeof(struct sf_control_data), NULL, |
202 | BUS_DMA_NOWAIT)) != 0) { |
203 | aprint_error_dev(sc->sc_dev, "unable to load control data " |
204 | "DMA map, error = %d\n" , error); |
205 | goto fail_3; |
206 | } |
207 | |
208 | /* |
209 | * Create the transmit buffer DMA maps. |
210 | */ |
211 | for (i = 0; i < SF_NTXDESC; i++) { |
212 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
213 | SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, |
214 | &sc->sc_txsoft[i].ds_dmamap)) != 0) { |
215 | aprint_error_dev(sc->sc_dev, |
216 | "unable to create tx DMA map %d, error = %d\n" , i, |
217 | error); |
218 | goto fail_4; |
219 | } |
220 | } |
221 | |
222 | /* |
223 | * Create the receive buffer DMA maps. |
224 | */ |
225 | for (i = 0; i < SF_NRXDESC; i++) { |
226 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
227 | MCLBYTES, 0, BUS_DMA_NOWAIT, |
228 | &sc->sc_rxsoft[i].ds_dmamap)) != 0) { |
229 | aprint_error_dev(sc->sc_dev, |
230 | "unable to create rx DMA map %d, error = %d\n" , i, |
231 | error); |
232 | goto fail_5; |
233 | } |
234 | } |
235 | |
236 | /* |
237 | * Reset the chip to a known state. |
238 | */ |
239 | sf_reset(sc); |
240 | |
241 | /* |
242 | * Read the Ethernet address from the EEPROM. |
243 | */ |
244 | for (i = 0; i < ETHER_ADDR_LEN; i++) |
245 | enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i); |
246 | |
247 | printf("%s: Ethernet address %s\n" , device_xname(sc->sc_dev), |
248 | ether_sprintf(enaddr)); |
249 | |
250 | if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64) |
251 | printf("%s: 64-bit PCI slot detected\n" , |
252 | device_xname(sc->sc_dev)); |
253 | |
254 | /* |
255 | * Initialize our media structures and probe the MII. |
256 | */ |
257 | sc->sc_mii.mii_ifp = ifp; |
258 | sc->sc_mii.mii_readreg = sf_mii_read; |
259 | sc->sc_mii.mii_writereg = sf_mii_write; |
260 | sc->sc_mii.mii_statchg = sf_mii_statchg; |
261 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
262 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, |
263 | ether_mediastatus); |
264 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
265 | MII_OFFSET_ANY, 0); |
266 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { |
267 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
268 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); |
269 | } else |
270 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
271 | |
272 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
273 | ifp->if_softc = sc; |
274 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
275 | ifp->if_ioctl = sf_ioctl; |
276 | ifp->if_start = sf_start; |
277 | ifp->if_watchdog = sf_watchdog; |
278 | ifp->if_init = sf_init; |
279 | ifp->if_stop = sf_stop; |
280 | IFQ_SET_READY(&ifp->if_snd); |
281 | |
282 | /* |
283 | * Attach the interface. |
284 | */ |
285 | if_attach(ifp); |
286 | ether_ifattach(ifp, enaddr); |
287 | |
288 | /* |
289 | * Make sure the interface is shutdown during reboot. |
290 | */ |
291 | if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown)) |
292 | pmf_class_network_register(sc->sc_dev, ifp); |
293 | else |
294 | aprint_error_dev(sc->sc_dev, |
295 | "couldn't establish power handler\n" ); |
296 | return; |
297 | |
298 | /* |
299 | * Free any resources we've allocated during the failed attach |
300 | * attempt. Do this in reverse order an fall through. |
301 | */ |
302 | fail_5: |
303 | for (i = 0; i < SF_NRXDESC; i++) { |
304 | if (sc->sc_rxsoft[i].ds_dmamap != NULL) |
305 | bus_dmamap_destroy(sc->sc_dmat, |
306 | sc->sc_rxsoft[i].ds_dmamap); |
307 | } |
308 | fail_4: |
309 | for (i = 0; i < SF_NTXDESC; i++) { |
310 | if (sc->sc_txsoft[i].ds_dmamap != NULL) |
311 | bus_dmamap_destroy(sc->sc_dmat, |
312 | sc->sc_txsoft[i].ds_dmamap); |
313 | } |
314 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); |
315 | fail_3: |
316 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); |
317 | fail_2: |
318 | bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data, |
319 | sizeof(struct sf_control_data)); |
320 | fail_1: |
321 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
322 | fail_0: |
323 | return; |
324 | } |
325 | |
326 | /* |
327 | * sf_shutdown: |
328 | * |
329 | * Shutdown hook -- make sure the interface is stopped at reboot. |
330 | */ |
331 | static bool |
332 | sf_shutdown(device_t self, int howto) |
333 | { |
334 | struct sf_softc *sc; |
335 | |
336 | sc = device_private(self); |
337 | sf_stop(&sc->sc_ethercom.ec_if, 1); |
338 | |
339 | return true; |
340 | } |
341 | |
342 | /* |
343 | * sf_start: [ifnet interface function] |
344 | * |
345 | * Start packet transmission on the interface. |
346 | */ |
347 | static void |
348 | sf_start(struct ifnet *ifp) |
349 | { |
350 | struct sf_softc *sc = ifp->if_softc; |
351 | struct mbuf *m0, *m; |
352 | struct sf_txdesc0 *txd; |
353 | struct sf_descsoft *ds; |
354 | bus_dmamap_t dmamap; |
355 | int error, producer, last = -1, opending, seg; |
356 | |
357 | /* |
358 | * Remember the previous number of pending transmits. |
359 | */ |
360 | opending = sc->sc_txpending; |
361 | |
362 | /* |
363 | * Find out where we're sitting. |
364 | */ |
365 | producer = SF_TXDINDEX_TO_HOST( |
366 | TDQPI_HiPrTxProducerIndex_get( |
367 | sf_funcreg_read(sc, SF_TxDescQueueProducerIndex))); |
368 | |
369 | /* |
370 | * Loop through the send queue, setting up transmit descriptors |
371 | * until we drain the queue, or use up all available transmit |
372 | * descriptors. Leave a blank one at the end for sanity's sake. |
373 | */ |
374 | while (sc->sc_txpending < (SF_NTXDESC - 1)) { |
375 | /* |
376 | * Grab a packet off the queue. |
377 | */ |
378 | IFQ_POLL(&ifp->if_snd, m0); |
379 | if (m0 == NULL) |
380 | break; |
381 | m = NULL; |
382 | |
383 | /* |
384 | * Get the transmit descriptor. |
385 | */ |
386 | txd = &sc->sc_txdescs[producer]; |
387 | ds = &sc->sc_txsoft[producer]; |
388 | dmamap = ds->ds_dmamap; |
389 | |
390 | /* |
391 | * Load the DMA map. If this fails, the packet either |
392 | * didn't fit in the allotted number of frags, or we were |
393 | * short on resources. In this case, we'll copy and try |
394 | * again. |
395 | */ |
396 | if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
397 | BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { |
398 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
399 | if (m == NULL) { |
400 | aprint_error_dev(sc->sc_dev, |
401 | "unable to allocate Tx mbuf\n" ); |
402 | break; |
403 | } |
404 | if (m0->m_pkthdr.len > MHLEN) { |
405 | MCLGET(m, M_DONTWAIT); |
406 | if ((m->m_flags & M_EXT) == 0) { |
407 | aprint_error_dev(sc->sc_dev, |
408 | "unable to allocate Tx cluster\n" ); |
409 | m_freem(m); |
410 | break; |
411 | } |
412 | } |
413 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); |
414 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; |
415 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, |
416 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
417 | if (error) { |
418 | aprint_error_dev(sc->sc_dev, |
419 | "unable to load Tx buffer, error = %d\n" , |
420 | error); |
421 | break; |
422 | } |
423 | } |
424 | |
425 | /* |
426 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. |
427 | */ |
428 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
429 | if (m != NULL) { |
430 | m_freem(m0); |
431 | m0 = m; |
432 | } |
433 | |
434 | /* Initialize the descriptor. */ |
435 | txd->td_word0 = |
436 | htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len); |
437 | if (producer == (SF_NTXDESC - 1)) |
438 | txd->td_word0 |= TD_W0_END; |
439 | txd->td_word1 = htole32(dmamap->dm_nsegs); |
440 | for (seg = 0; seg < dmamap->dm_nsegs; seg++) { |
441 | txd->td_frags[seg].fr_addr = |
442 | htole32(dmamap->dm_segs[seg].ds_addr); |
443 | txd->td_frags[seg].fr_len = |
444 | htole32(dmamap->dm_segs[seg].ds_len); |
445 | } |
446 | |
447 | /* Sync the descriptor and the DMA map. */ |
448 | SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE); |
449 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
450 | BUS_DMASYNC_PREWRITE); |
451 | |
452 | /* |
453 | * Store a pointer to the packet so we can free it later. |
454 | */ |
455 | ds->ds_mbuf = m0; |
456 | |
457 | /* Advance the Tx pointer. */ |
458 | sc->sc_txpending++; |
459 | last = producer; |
460 | producer = SF_NEXTTX(producer); |
461 | |
462 | /* |
463 | * Pass the packet to any BPF listeners. |
464 | */ |
465 | bpf_mtap(ifp, m0); |
466 | } |
467 | |
468 | if (sc->sc_txpending == (SF_NTXDESC - 1)) { |
469 | /* No more slots left; notify upper layer. */ |
470 | ifp->if_flags |= IFF_OACTIVE; |
471 | } |
472 | |
473 | if (sc->sc_txpending != opending) { |
474 | KASSERT(last != -1); |
475 | /* |
476 | * We enqueued packets. Cause a transmit interrupt to |
477 | * happen on the last packet we enqueued, and give the |
478 | * new descriptors to the chip by writing the new |
479 | * producer index. |
480 | */ |
481 | sc->sc_txdescs[last].td_word0 |= TD_W0_INTR; |
482 | SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE); |
483 | |
484 | sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, |
485 | TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer))); |
486 | |
487 | /* Set a watchdog timer in case the chip flakes out. */ |
488 | ifp->if_timer = 5; |
489 | } |
490 | } |
491 | |
492 | /* |
493 | * sf_watchdog: [ifnet interface function] |
494 | * |
495 | * Watchdog timer handler. |
496 | */ |
497 | static void |
498 | sf_watchdog(struct ifnet *ifp) |
499 | { |
500 | struct sf_softc *sc = ifp->if_softc; |
501 | |
502 | printf("%s: device timeout\n" , device_xname(sc->sc_dev)); |
503 | ifp->if_oerrors++; |
504 | |
505 | (void) sf_init(ifp); |
506 | |
507 | /* Try to get more packets going. */ |
508 | sf_start(ifp); |
509 | } |
510 | |
511 | /* |
512 | * sf_ioctl: [ifnet interface function] |
513 | * |
514 | * Handle control requests from the operator. |
515 | */ |
516 | static int |
517 | sf_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
518 | { |
519 | struct sf_softc *sc = ifp->if_softc; |
520 | int s, error; |
521 | |
522 | s = splnet(); |
523 | |
524 | error = ether_ioctl(ifp, cmd, data); |
525 | if (error == ENETRESET) { |
526 | /* |
527 | * Multicast list has changed; set the hardware filter |
528 | * accordingly. |
529 | */ |
530 | if (ifp->if_flags & IFF_RUNNING) |
531 | sf_set_filter(sc); |
532 | error = 0; |
533 | } |
534 | |
535 | /* Try to get more packets going. */ |
536 | sf_start(ifp); |
537 | |
538 | splx(s); |
539 | return (error); |
540 | } |
541 | |
542 | /* |
543 | * sf_intr: |
544 | * |
545 | * Interrupt service routine. |
546 | */ |
547 | int |
548 | sf_intr(void *arg) |
549 | { |
550 | struct sf_softc *sc = arg; |
551 | uint32_t isr; |
552 | int handled = 0, wantinit = 0; |
553 | |
554 | for (;;) { |
555 | /* Reading clears all interrupts we're interested in. */ |
556 | isr = sf_funcreg_read(sc, SF_InterruptStatus); |
557 | if ((isr & IS_PCIPadInt) == 0) |
558 | break; |
559 | |
560 | handled = 1; |
561 | |
562 | /* Handle receive interrupts. */ |
563 | if (isr & IS_RxQ1DoneInt) |
564 | sf_rxintr(sc); |
565 | |
566 | /* Handle transmit completion interrupts. */ |
567 | if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt)) |
568 | sf_txintr(sc); |
569 | |
570 | /* Handle abnormal interrupts. */ |
571 | if (isr & IS_AbnormalInterrupt) { |
572 | /* Statistics. */ |
573 | if (isr & IS_StatisticWrapInt) |
574 | sf_stats_update(sc); |
575 | |
576 | /* DMA errors. */ |
577 | if (isr & IS_DmaErrInt) { |
578 | wantinit = 1; |
579 | aprint_error_dev(sc->sc_dev, |
580 | "WARNING: DMA error\n" ); |
581 | } |
582 | |
583 | /* Transmit FIFO underruns. */ |
584 | if (isr & IS_TxDataLowInt) { |
585 | if (sc->sc_txthresh < 0xff) |
586 | sc->sc_txthresh++; |
587 | printf("%s: transmit FIFO underrun, new " |
588 | "threshold: %d bytes\n" , |
589 | device_xname(sc->sc_dev), |
590 | sc->sc_txthresh * 16); |
591 | sf_funcreg_write(sc, SF_TransmitFrameCSR, |
592 | sc->sc_TransmitFrameCSR | |
593 | TFCSR_TransmitThreshold(sc->sc_txthresh)); |
594 | sf_funcreg_write(sc, SF_TxDescQueueCtrl, |
595 | sc->sc_TxDescQueueCtrl | |
596 | TDQC_TxHighPriorityFifoThreshold( |
597 | sc->sc_txthresh)); |
598 | } |
599 | } |
600 | } |
601 | |
602 | if (handled) { |
603 | /* Reset the interface, if necessary. */ |
604 | if (wantinit) |
605 | sf_init(&sc->sc_ethercom.ec_if); |
606 | |
607 | /* Try and get more packets going. */ |
608 | sf_start(&sc->sc_ethercom.ec_if); |
609 | } |
610 | |
611 | return (handled); |
612 | } |
613 | |
614 | /* |
615 | * sf_txintr: |
616 | * |
617 | * Helper -- handle transmit completion interrupts. |
618 | */ |
619 | static void |
620 | sf_txintr(struct sf_softc *sc) |
621 | { |
622 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
623 | struct sf_descsoft *ds; |
624 | uint32_t cqci, tcd; |
625 | int consumer, producer, txidx; |
626 | |
627 | try_again: |
628 | cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); |
629 | |
630 | consumer = CQCI_TxCompletionConsumerIndex_get(cqci); |
631 | producer = CQPI_TxCompletionProducerIndex_get( |
632 | sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); |
633 | |
634 | if (consumer == producer) |
635 | return; |
636 | |
637 | ifp->if_flags &= ~IFF_OACTIVE; |
638 | |
639 | while (consumer != producer) { |
640 | SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD); |
641 | tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0); |
642 | |
643 | txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd)); |
644 | #ifdef DIAGNOSTIC |
645 | if ((tcd & TCD_PR) == 0) |
646 | aprint_error_dev(sc->sc_dev, |
647 | "Tx queue mismatch, index %d\n" , txidx); |
648 | #endif |
649 | /* |
650 | * NOTE: stats are updated later. We're just |
651 | * releasing packets that have been DMA'd to |
652 | * the chip. |
653 | */ |
654 | ds = &sc->sc_txsoft[txidx]; |
655 | SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE); |
656 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, |
657 | 0, ds->ds_dmamap->dm_mapsize, |
658 | BUS_DMASYNC_POSTWRITE); |
659 | m_freem(ds->ds_mbuf); |
660 | ds->ds_mbuf = NULL; |
661 | |
662 | consumer = SF_NEXTTCD(consumer); |
663 | sc->sc_txpending--; |
664 | } |
665 | |
666 | /* XXXJRT -- should be KDASSERT() */ |
667 | KASSERT(sc->sc_txpending >= 0); |
668 | |
669 | /* If all packets are done, cancel the watchdog timer. */ |
670 | if (sc->sc_txpending == 0) |
671 | ifp->if_timer = 0; |
672 | |
673 | /* Update the consumer index. */ |
674 | sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, |
675 | (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) | |
676 | CQCI_TxCompletionConsumerIndex(consumer)); |
677 | |
678 | /* Double check for new completions. */ |
679 | goto try_again; |
680 | } |
681 | |
682 | /* |
683 | * sf_rxintr: |
684 | * |
685 | * Helper -- handle receive interrupts. |
686 | */ |
687 | static void |
688 | sf_rxintr(struct sf_softc *sc) |
689 | { |
690 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
691 | struct sf_descsoft *ds; |
692 | struct sf_rcd_full *rcd; |
693 | struct mbuf *m; |
694 | uint32_t cqci, word0; |
695 | int consumer, producer, bufproducer, rxidx, len; |
696 | |
697 | try_again: |
698 | cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); |
699 | |
700 | consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci); |
701 | producer = CQPI_RxCompletionQ1ProducerIndex_get( |
702 | sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); |
703 | bufproducer = RXQ1P_RxDescQ1Producer_get( |
704 | sf_funcreg_read(sc, SF_RxDescQueue1Ptrs)); |
705 | |
706 | if (consumer == producer) |
707 | return; |
708 | |
709 | while (consumer != producer) { |
710 | rcd = &sc->sc_rxcomp[consumer]; |
711 | SF_CDRXCSYNC(sc, consumer, |
712 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
713 | SF_CDRXCSYNC(sc, consumer, |
714 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
715 | |
716 | word0 = le32toh(rcd->rcd_word0); |
717 | rxidx = RCD_W0_EndIndex(word0); |
718 | |
719 | ds = &sc->sc_rxsoft[rxidx]; |
720 | |
721 | consumer = SF_NEXTRCD(consumer); |
722 | bufproducer = SF_NEXTRX(bufproducer); |
723 | |
724 | if ((word0 & RCD_W0_OK) == 0) { |
725 | SF_INIT_RXDESC(sc, rxidx); |
726 | continue; |
727 | } |
728 | |
729 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
730 | ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
731 | |
732 | /* |
733 | * No errors; receive the packet. Note that we have |
734 | * configured the Starfire to NOT transfer the CRC |
735 | * with the packet. |
736 | */ |
737 | len = RCD_W0_Length(word0); |
738 | |
739 | #ifdef __NO_STRICT_ALIGNMENT |
740 | /* |
741 | * Allocate a new mbuf cluster. If that fails, we are |
742 | * out of memory, and must drop the packet and recycle |
743 | * the buffer that's already attached to this descriptor. |
744 | */ |
745 | m = ds->ds_mbuf; |
746 | if (sf_add_rxbuf(sc, rxidx) != 0) { |
747 | ifp->if_ierrors++; |
748 | SF_INIT_RXDESC(sc, rxidx); |
749 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
750 | ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
751 | continue; |
752 | } |
753 | #else |
754 | /* |
755 | * The Starfire's receive buffer must be 4-byte aligned. |
756 | * But this means that the data after the Ethernet header |
757 | * is misaligned. We must allocate a new buffer and |
758 | * copy the data, shifted forward 2 bytes. |
759 | */ |
760 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
761 | if (m == NULL) { |
762 | dropit: |
763 | ifp->if_ierrors++; |
764 | SF_INIT_RXDESC(sc, rxidx); |
765 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
766 | ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
767 | continue; |
768 | } |
769 | if (len > (MHLEN - 2)) { |
770 | MCLGET(m, M_DONTWAIT); |
771 | if ((m->m_flags & M_EXT) == 0) { |
772 | m_freem(m); |
773 | goto dropit; |
774 | } |
775 | } |
776 | m->m_data += 2; |
777 | |
778 | /* |
779 | * Note that we use cluster for incoming frames, so the |
780 | * buffer is virtually contiguous. |
781 | */ |
782 | memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len); |
783 | |
784 | /* Allow the receive descriptor to continue using its mbuf. */ |
785 | SF_INIT_RXDESC(sc, rxidx); |
786 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
787 | ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
788 | #endif /* __NO_STRICT_ALIGNMENT */ |
789 | |
790 | m_set_rcvif(m, ifp); |
791 | m->m_pkthdr.len = m->m_len = len; |
792 | |
793 | /* |
794 | * Pass this up to any BPF listeners. |
795 | */ |
796 | bpf_mtap(ifp, m); |
797 | |
798 | /* Pass it on. */ |
799 | if_percpuq_enqueue(ifp->if_percpuq, m); |
800 | } |
801 | |
802 | /* Update the chip's pointers. */ |
803 | sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, |
804 | (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) | |
805 | CQCI_RxCompletionQ1ConsumerIndex(consumer)); |
806 | sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, |
807 | RXQ1P_RxDescQ1Producer(bufproducer)); |
808 | |
809 | /* Double-check for any new completions. */ |
810 | goto try_again; |
811 | } |
812 | |
813 | /* |
814 | * sf_tick: |
815 | * |
816 | * One second timer, used to tick the MII and update stats. |
817 | */ |
818 | static void |
819 | sf_tick(void *arg) |
820 | { |
821 | struct sf_softc *sc = arg; |
822 | int s; |
823 | |
824 | s = splnet(); |
825 | mii_tick(&sc->sc_mii); |
826 | sf_stats_update(sc); |
827 | splx(s); |
828 | |
829 | callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc); |
830 | } |
831 | |
832 | /* |
833 | * sf_stats_update: |
834 | * |
835 | * Read the statitistics counters. |
836 | */ |
837 | static void |
838 | sf_stats_update(struct sf_softc *sc) |
839 | { |
840 | struct sf_stats stats; |
841 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
842 | uint32_t *p; |
843 | u_int i; |
844 | |
845 | p = &stats.TransmitOKFrames; |
846 | for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) { |
847 | *p++ = sf_genreg_read(sc, |
848 | SF_STATS_BASE + (i * sizeof(uint32_t))); |
849 | sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0); |
850 | } |
851 | |
852 | ifp->if_opackets += stats.TransmitOKFrames; |
853 | |
854 | ifp->if_collisions += stats.SingleCollisionFrames + |
855 | stats.MultipleCollisionFrames; |
856 | |
857 | ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions + |
858 | stats.TransmitAbortDueToExcessingDeferral + |
859 | stats.FramesLostDueToInternalTransmitErrors; |
860 | |
861 | ifp->if_ipackets += stats.ReceiveOKFrames; |
862 | |
863 | ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors + |
864 | stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort + |
865 | stats.ReceiveFramesJabbersError + |
866 | stats.FramesLostDueToInternalReceiveErrors; |
867 | } |
868 | |
869 | /* |
870 | * sf_reset: |
871 | * |
872 | * Perform a soft reset on the Starfire. |
873 | */ |
874 | static void |
875 | sf_reset(struct sf_softc *sc) |
876 | { |
877 | int i; |
878 | |
879 | sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); |
880 | |
881 | sf_macreset(sc); |
882 | |
883 | sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset); |
884 | for (i = 0; i < 1000; i++) { |
885 | delay(10); |
886 | if ((sf_funcreg_read(sc, SF_PciDeviceConfig) & |
887 | PDC_SoftReset) == 0) |
888 | break; |
889 | } |
890 | |
891 | if (i == 1000) { |
892 | aprint_error_dev(sc->sc_dev, "reset failed to complete\n" ); |
893 | sf_funcreg_write(sc, SF_PciDeviceConfig, 0); |
894 | } |
895 | |
896 | delay(1000); |
897 | } |
898 | |
899 | /* |
900 | * sf_macreset: |
901 | * |
902 | * Reset the MAC portion of the Starfire. |
903 | */ |
904 | static void |
905 | sf_macreset(struct sf_softc *sc) |
906 | { |
907 | |
908 | sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst); |
909 | delay(1000); |
910 | sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); |
911 | } |
912 | |
913 | /* |
914 | * sf_init: [ifnet interface function] |
915 | * |
916 | * Initialize the interface. Must be called at splnet(). |
917 | */ |
918 | static int |
919 | sf_init(struct ifnet *ifp) |
920 | { |
921 | struct sf_softc *sc = ifp->if_softc; |
922 | struct sf_descsoft *ds; |
923 | int error = 0; |
924 | u_int i; |
925 | |
926 | /* |
927 | * Cancel any pending I/O. |
928 | */ |
929 | sf_stop(ifp, 0); |
930 | |
931 | /* |
932 | * Reset the Starfire to a known state. |
933 | */ |
934 | sf_reset(sc); |
935 | |
936 | /* Clear the stat counters. */ |
937 | for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t)) |
938 | sf_genreg_write(sc, SF_STATS_BASE + i, 0); |
939 | |
940 | /* |
941 | * Initialize the transmit descriptor ring. |
942 | */ |
943 | memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); |
944 | sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0); |
945 | sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0)); |
946 | sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0); |
947 | |
948 | /* |
949 | * Initialize the transmit completion ring. |
950 | */ |
951 | for (i = 0; i < SF_NTCD; i++) { |
952 | sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID; |
953 | SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
954 | } |
955 | sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0); |
956 | sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0)); |
957 | |
958 | /* |
959 | * Initialize the receive descriptor ring. |
960 | */ |
961 | for (i = 0; i < SF_NRXDESC; i++) { |
962 | ds = &sc->sc_rxsoft[i]; |
963 | if (ds->ds_mbuf == NULL) { |
964 | if ((error = sf_add_rxbuf(sc, i)) != 0) { |
965 | aprint_error_dev(sc->sc_dev, |
966 | "unable to allocate or map rx buffer %d, " |
967 | "error = %d\n" , i, error); |
968 | /* |
969 | * XXX Should attempt to run with fewer receive |
970 | * XXX buffers instead of just failing. |
971 | */ |
972 | sf_rxdrain(sc); |
973 | goto out; |
974 | } |
975 | } else |
976 | SF_INIT_RXDESC(sc, i); |
977 | } |
978 | sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0); |
979 | sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0)); |
980 | sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0); |
981 | |
982 | /* |
983 | * Initialize the receive completion ring. |
984 | */ |
985 | for (i = 0; i < SF_NRCD; i++) { |
986 | sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID; |
987 | sc->sc_rxcomp[i].rcd_word1 = 0; |
988 | sc->sc_rxcomp[i].rcd_word2 = 0; |
989 | sc->sc_rxcomp[i].rcd_timestamp = 0; |
990 | SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
991 | } |
992 | sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) | |
993 | RCQ1C_RxCompletionQ1Type(3)); |
994 | sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0); |
995 | |
996 | /* |
997 | * Initialize the Tx CSR. |
998 | */ |
999 | sc->sc_TransmitFrameCSR = 0; |
1000 | sf_funcreg_write(sc, SF_TransmitFrameCSR, |
1001 | sc->sc_TransmitFrameCSR | |
1002 | TFCSR_TransmitThreshold(sc->sc_txthresh)); |
1003 | |
1004 | /* |
1005 | * Initialize the Tx descriptor control register. |
1006 | */ |
1007 | sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) | |
1008 | TDQC_TxDmaBurstSize(4) | /* default */ |
1009 | TDQC_MinFrameSpacing(3) | /* 128 bytes */ |
1010 | TDQC_TxDescType(0); |
1011 | sf_funcreg_write(sc, SF_TxDescQueueCtrl, |
1012 | sc->sc_TxDescQueueCtrl | |
1013 | TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh)); |
1014 | |
1015 | /* |
1016 | * Initialize the Rx descriptor control registers. |
1017 | */ |
1018 | sf_funcreg_write(sc, SF_RxDescQueue1Ctrl, |
1019 | RDQ1C_RxQ1BufferLength(MCLBYTES) | |
1020 | RDQ1C_RxDescSpacing(0)); |
1021 | sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0); |
1022 | |
1023 | /* |
1024 | * Initialize the Tx descriptor producer indices. |
1025 | */ |
1026 | sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, |
1027 | TDQPI_HiPrTxProducerIndex(0) | |
1028 | TDQPI_LoPrTxProducerIndex(0)); |
1029 | |
1030 | /* |
1031 | * Initialize the Rx descriptor producer indices. |
1032 | */ |
1033 | sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, |
1034 | RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1)); |
1035 | sf_funcreg_write(sc, SF_RxDescQueue2Ptrs, |
1036 | RXQ2P_RxDescQ2Producer(0)); |
1037 | |
1038 | /* |
1039 | * Initialize the Tx and Rx completion queue consumer indices. |
1040 | */ |
1041 | sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, |
1042 | CQCI_TxCompletionConsumerIndex(0) | |
1043 | CQCI_RxCompletionQ1ConsumerIndex(0)); |
1044 | sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0); |
1045 | |
1046 | /* |
1047 | * Initialize the Rx DMA control register. |
1048 | */ |
1049 | sf_funcreg_write(sc, SF_RxDmaCtrl, |
1050 | RDC_RxHighPriorityThreshold(6) | /* default */ |
1051 | RDC_RxBurstSize(4)); /* default */ |
1052 | |
1053 | /* |
1054 | * Set the receive filter. |
1055 | */ |
1056 | sc->sc_RxAddressFilteringCtl = 0; |
1057 | sf_set_filter(sc); |
1058 | |
1059 | /* |
1060 | * Set MacConfig1. When we set the media, MacConfig1 will |
1061 | * actually be written and the MAC part reset. |
1062 | */ |
1063 | sc->sc_MacConfig1 = MC1_PadEn; |
1064 | |
1065 | /* |
1066 | * Set the media. |
1067 | */ |
1068 | if ((error = ether_mediachange(ifp)) != 0) |
1069 | goto out; |
1070 | |
1071 | /* |
1072 | * Initialize the interrupt register. |
1073 | */ |
1074 | sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt | |
1075 | IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt | |
1076 | IS_StatisticWrapInt; |
1077 | sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn); |
1078 | |
1079 | sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable | |
1080 | PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT)); |
1081 | |
1082 | /* |
1083 | * Start the transmit and receive processes. |
1084 | */ |
1085 | sf_funcreg_write(sc, SF_GeneralEthernetCtrl, |
1086 | GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn); |
1087 | |
1088 | /* Start the on second clock. */ |
1089 | callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc); |
1090 | |
1091 | /* |
1092 | * Note that the interface is now running. |
1093 | */ |
1094 | ifp->if_flags |= IFF_RUNNING; |
1095 | ifp->if_flags &= ~IFF_OACTIVE; |
1096 | |
1097 | out: |
1098 | if (error) { |
1099 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1100 | ifp->if_timer = 0; |
1101 | printf("%s: interface not running\n" , device_xname(sc->sc_dev)); |
1102 | } |
1103 | return (error); |
1104 | } |
1105 | |
1106 | /* |
1107 | * sf_rxdrain: |
1108 | * |
1109 | * Drain the receive queue. |
1110 | */ |
1111 | static void |
1112 | sf_rxdrain(struct sf_softc *sc) |
1113 | { |
1114 | struct sf_descsoft *ds; |
1115 | int i; |
1116 | |
1117 | for (i = 0; i < SF_NRXDESC; i++) { |
1118 | ds = &sc->sc_rxsoft[i]; |
1119 | if (ds->ds_mbuf != NULL) { |
1120 | bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); |
1121 | m_freem(ds->ds_mbuf); |
1122 | ds->ds_mbuf = NULL; |
1123 | } |
1124 | } |
1125 | } |
1126 | |
1127 | /* |
1128 | * sf_stop: [ifnet interface function] |
1129 | * |
1130 | * Stop transmission on the interface. |
1131 | */ |
1132 | static void |
1133 | sf_stop(struct ifnet *ifp, int disable) |
1134 | { |
1135 | struct sf_softc *sc = ifp->if_softc; |
1136 | struct sf_descsoft *ds; |
1137 | int i; |
1138 | |
1139 | /* Stop the one second clock. */ |
1140 | callout_stop(&sc->sc_tick_callout); |
1141 | |
1142 | /* Down the MII. */ |
1143 | mii_down(&sc->sc_mii); |
1144 | |
1145 | /* Disable interrupts. */ |
1146 | sf_funcreg_write(sc, SF_InterruptEn, 0); |
1147 | |
1148 | /* Stop the transmit and receive processes. */ |
1149 | sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); |
1150 | |
1151 | /* |
1152 | * Release any queued transmit buffers. |
1153 | */ |
1154 | for (i = 0; i < SF_NTXDESC; i++) { |
1155 | ds = &sc->sc_txsoft[i]; |
1156 | if (ds->ds_mbuf != NULL) { |
1157 | bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); |
1158 | m_freem(ds->ds_mbuf); |
1159 | ds->ds_mbuf = NULL; |
1160 | } |
1161 | } |
1162 | |
1163 | /* |
1164 | * Mark the interface down and cancel the watchdog timer. |
1165 | */ |
1166 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1167 | ifp->if_timer = 0; |
1168 | |
1169 | if (disable) |
1170 | sf_rxdrain(sc); |
1171 | } |
1172 | |
1173 | /* |
1174 | * sf_read_eeprom: |
1175 | * |
1176 | * Read from the Starfire EEPROM. |
1177 | */ |
1178 | static uint8_t |
1179 | sf_read_eeprom(struct sf_softc *sc, int offset) |
1180 | { |
1181 | uint32_t reg; |
1182 | |
1183 | reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3)); |
1184 | |
1185 | return ((reg >> (8 * (offset & 3))) & 0xff); |
1186 | } |
1187 | |
1188 | /* |
1189 | * sf_add_rxbuf: |
1190 | * |
1191 | * Add a receive buffer to the indicated descriptor. |
1192 | */ |
1193 | static int |
1194 | sf_add_rxbuf(struct sf_softc *sc, int idx) |
1195 | { |
1196 | struct sf_descsoft *ds = &sc->sc_rxsoft[idx]; |
1197 | struct mbuf *m; |
1198 | int error; |
1199 | |
1200 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1201 | if (m == NULL) |
1202 | return (ENOBUFS); |
1203 | |
1204 | MCLGET(m, M_DONTWAIT); |
1205 | if ((m->m_flags & M_EXT) == 0) { |
1206 | m_freem(m); |
1207 | return (ENOBUFS); |
1208 | } |
1209 | |
1210 | if (ds->ds_mbuf != NULL) |
1211 | bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); |
1212 | |
1213 | ds->ds_mbuf = m; |
1214 | |
1215 | error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, |
1216 | m->m_ext.ext_buf, m->m_ext.ext_size, NULL, |
1217 | BUS_DMA_READ|BUS_DMA_NOWAIT); |
1218 | if (error) { |
1219 | aprint_error_dev(sc->sc_dev, |
1220 | "can't load rx DMA map %d, error = %d\n" , idx, error); |
1221 | panic("sf_add_rxbuf" ); /* XXX */ |
1222 | } |
1223 | |
1224 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
1225 | ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
1226 | |
1227 | SF_INIT_RXDESC(sc, idx); |
1228 | |
1229 | return (0); |
1230 | } |
1231 | |
1232 | static void |
1233 | sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr) |
1234 | { |
1235 | uint32_t reg0, reg1, reg2; |
1236 | |
1237 | reg0 = enaddr[5] | (enaddr[4] << 8); |
1238 | reg1 = enaddr[3] | (enaddr[2] << 8); |
1239 | reg2 = enaddr[1] | (enaddr[0] << 8); |
1240 | |
1241 | sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0); |
1242 | sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1); |
1243 | sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2); |
1244 | } |
1245 | |
1246 | static void |
1247 | sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr) |
1248 | { |
1249 | uint32_t hash, slot, reg; |
1250 | |
1251 | hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23; |
1252 | slot = hash >> 4; |
1253 | |
1254 | reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10)); |
1255 | reg |= 1 << (hash & 0xf); |
1256 | sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg); |
1257 | } |
1258 | |
1259 | /* |
1260 | * sf_set_filter: |
1261 | * |
1262 | * Set the Starfire receive filter. |
1263 | */ |
1264 | static void |
1265 | sf_set_filter(struct sf_softc *sc) |
1266 | { |
1267 | struct ethercom *ec = &sc->sc_ethercom; |
1268 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1269 | struct ether_multi *enm; |
1270 | struct ether_multistep step; |
1271 | int i; |
1272 | |
1273 | /* Start by clearing the perfect and hash tables. */ |
1274 | for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t)) |
1275 | sf_genreg_write(sc, SF_PERFECT_BASE + i, 0); |
1276 | |
1277 | for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t)) |
1278 | sf_genreg_write(sc, SF_HASH_BASE + i, 0); |
1279 | |
1280 | /* |
1281 | * Clear the perfect and hash mode bits. |
1282 | */ |
1283 | sc->sc_RxAddressFilteringCtl &= |
1284 | ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3)); |
1285 | |
1286 | if (ifp->if_flags & IFF_BROADCAST) |
1287 | sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast; |
1288 | else |
1289 | sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast; |
1290 | |
1291 | if (ifp->if_flags & IFF_PROMISC) { |
1292 | sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode; |
1293 | goto allmulti; |
1294 | } else |
1295 | sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode; |
1296 | |
1297 | /* |
1298 | * Set normal perfect filtering mode. |
1299 | */ |
1300 | sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1); |
1301 | |
1302 | /* |
1303 | * First, write the station address to the perfect filter |
1304 | * table. |
1305 | */ |
1306 | sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl)); |
1307 | |
1308 | /* |
1309 | * Now set the hash bits for each multicast address in our |
1310 | * list. |
1311 | */ |
1312 | ETHER_FIRST_MULTI(step, ec, enm); |
1313 | if (enm == NULL) |
1314 | goto done; |
1315 | while (enm != NULL) { |
1316 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
1317 | /* |
1318 | * We must listen to a range of multicast addresses. |
1319 | * For now, just accept all multicasts, rather than |
1320 | * trying to set only those filter bits needed to match |
1321 | * the range. (At this time, the only use of address |
1322 | * ranges is for IP multicast routing, for which the |
1323 | * range is big enough to require all bits set.) |
1324 | */ |
1325 | goto allmulti; |
1326 | } |
1327 | sf_set_filter_hash(sc, enm->enm_addrlo); |
1328 | ETHER_NEXT_MULTI(step, enm); |
1329 | } |
1330 | |
1331 | /* |
1332 | * Set "hash only multicast dest, match regardless of VLAN ID". |
1333 | */ |
1334 | sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2); |
1335 | goto done; |
1336 | |
1337 | allmulti: |
1338 | /* |
1339 | * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode. |
1340 | */ |
1341 | sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast; |
1342 | ifp->if_flags |= IFF_ALLMULTI; |
1343 | |
1344 | done: |
1345 | sf_funcreg_write(sc, SF_RxAddressFilteringCtl, |
1346 | sc->sc_RxAddressFilteringCtl); |
1347 | } |
1348 | |
1349 | /* |
1350 | * sf_mii_read: [mii interface function] |
1351 | * |
1352 | * Read from the MII. |
1353 | */ |
1354 | static int |
1355 | sf_mii_read(device_t self, int phy, int reg) |
1356 | { |
1357 | struct sf_softc *sc = device_private(self); |
1358 | uint32_t v; |
1359 | int i; |
1360 | |
1361 | for (i = 0; i < 1000; i++) { |
1362 | v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)); |
1363 | if (v & MiiDataValid) |
1364 | break; |
1365 | delay(1); |
1366 | } |
1367 | |
1368 | if ((v & MiiDataValid) == 0) |
1369 | return (0); |
1370 | |
1371 | if (MiiRegDataPort(v) == 0xffff) |
1372 | return (0); |
1373 | |
1374 | return (MiiRegDataPort(v)); |
1375 | } |
1376 | |
1377 | /* |
1378 | * sf_mii_write: [mii interface function] |
1379 | * |
1380 | * Write to the MII. |
1381 | */ |
1382 | static void |
1383 | sf_mii_write(device_t self, int phy, int reg, int val) |
1384 | { |
1385 | struct sf_softc *sc = device_private(self); |
1386 | int i; |
1387 | |
1388 | sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val); |
1389 | |
1390 | for (i = 0; i < 1000; i++) { |
1391 | if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) & |
1392 | MiiBusy) == 0) |
1393 | return; |
1394 | delay(1); |
1395 | } |
1396 | |
1397 | printf("%s: MII write timed out\n" , device_xname(sc->sc_dev)); |
1398 | } |
1399 | |
1400 | /* |
1401 | * sf_mii_statchg: [mii interface function] |
1402 | * |
1403 | * Callback from the PHY when the media changes. |
1404 | */ |
1405 | static void |
1406 | sf_mii_statchg(struct ifnet *ifp) |
1407 | { |
1408 | struct sf_softc *sc = ifp->if_softc; |
1409 | uint32_t ipg; |
1410 | |
1411 | if (sc->sc_mii.mii_media_active & IFM_FDX) { |
1412 | sc->sc_MacConfig1 |= MC1_FullDuplex; |
1413 | ipg = 0x15; |
1414 | } else { |
1415 | sc->sc_MacConfig1 &= ~MC1_FullDuplex; |
1416 | ipg = 0x11; |
1417 | } |
1418 | |
1419 | sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); |
1420 | sf_macreset(sc); |
1421 | |
1422 | sf_genreg_write(sc, SF_BkToBkIPG, ipg); |
1423 | } |
1424 | |