1 | /* $NetBSD: smc83c170.c,v 1.83 2016/06/10 13:27:13 ozaki-r Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center. |
10 | * |
11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions |
13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. |
19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ |
32 | |
33 | /* |
34 | * Device driver for the Standard Microsystems Corp. 83C170 |
35 | * Ethernet PCI Integrated Controller (EPIC/100). |
36 | */ |
37 | |
38 | #include <sys/cdefs.h> |
39 | __KERNEL_RCSID(0, "$NetBSD: smc83c170.c,v 1.83 2016/06/10 13:27:13 ozaki-r Exp $" ); |
40 | |
41 | |
42 | #include <sys/param.h> |
43 | #include <sys/systm.h> |
44 | #include <sys/callout.h> |
45 | #include <sys/mbuf.h> |
46 | #include <sys/malloc.h> |
47 | #include <sys/kernel.h> |
48 | #include <sys/socket.h> |
49 | #include <sys/ioctl.h> |
50 | #include <sys/errno.h> |
51 | #include <sys/device.h> |
52 | |
53 | #include <net/if.h> |
54 | #include <net/if_dl.h> |
55 | #include <net/if_media.h> |
56 | #include <net/if_ether.h> |
57 | |
58 | #include <net/bpf.h> |
59 | |
60 | #include <sys/bus.h> |
61 | #include <sys/intr.h> |
62 | |
63 | #include <dev/mii/miivar.h> |
64 | #include <dev/mii/lxtphyreg.h> |
65 | |
66 | #include <dev/ic/smc83c170reg.h> |
67 | #include <dev/ic/smc83c170var.h> |
68 | |
69 | void epic_start(struct ifnet *); |
70 | void epic_watchdog(struct ifnet *); |
71 | int epic_ioctl(struct ifnet *, u_long, void *); |
72 | int epic_init(struct ifnet *); |
73 | void epic_stop(struct ifnet *, int); |
74 | |
75 | bool epic_shutdown(device_t, int); |
76 | |
77 | void epic_reset(struct epic_softc *); |
78 | void epic_rxdrain(struct epic_softc *); |
79 | int epic_add_rxbuf(struct epic_softc *, int); |
80 | void epic_read_eeprom(struct epic_softc *, int, int, uint16_t *); |
81 | void epic_set_mchash(struct epic_softc *); |
82 | void epic_fixup_clock_source(struct epic_softc *); |
83 | int epic_mii_read(device_t, int, int); |
84 | void epic_mii_write(device_t, int, int, int); |
85 | int epic_mii_wait(struct epic_softc *, uint32_t); |
86 | void epic_tick(void *); |
87 | |
88 | void epic_statchg(struct ifnet *); |
89 | int epic_mediachange(struct ifnet *); |
90 | |
91 | #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \ |
92 | INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC) |
93 | |
94 | int epic_copy_small = 0; |
95 | |
96 | #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) |
97 | |
98 | /* |
99 | * Attach an EPIC interface to the system. |
100 | */ |
101 | void |
102 | epic_attach(struct epic_softc *sc) |
103 | { |
104 | bus_space_tag_t st = sc->sc_st; |
105 | bus_space_handle_t sh = sc->sc_sh; |
106 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
107 | int rseg, error, miiflags; |
108 | u_int i; |
109 | bus_dma_segment_t seg; |
110 | uint8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; |
111 | uint16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; |
112 | char *nullbuf; |
113 | |
114 | callout_init(&sc->sc_mii_callout, 0); |
115 | |
116 | /* |
117 | * Allocate the control data structures, and create and load the |
118 | * DMA map for it. |
119 | */ |
120 | if ((error = bus_dmamem_alloc(sc->sc_dmat, |
121 | sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0, |
122 | &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { |
123 | aprint_error_dev(sc->sc_dev, |
124 | "unable to allocate control data, error = %d\n" , error); |
125 | goto fail_0; |
126 | } |
127 | |
128 | if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, |
129 | sizeof(struct epic_control_data) + ETHER_PAD_LEN, |
130 | (void **)&sc->sc_control_data, |
131 | BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { |
132 | aprint_error_dev(sc->sc_dev, |
133 | "unable to map control data, error = %d\n" , error); |
134 | goto fail_1; |
135 | } |
136 | nullbuf = |
137 | (char *)sc->sc_control_data + sizeof(struct epic_control_data); |
138 | memset(nullbuf, 0, ETHER_PAD_LEN); |
139 | |
140 | if ((error = bus_dmamap_create(sc->sc_dmat, |
141 | sizeof(struct epic_control_data), 1, |
142 | sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, |
143 | &sc->sc_cddmamap)) != 0) { |
144 | aprint_error_dev(sc->sc_dev, |
145 | "unable to create control data DMA map, error = %d\n" , |
146 | error); |
147 | goto fail_2; |
148 | } |
149 | |
150 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, |
151 | sc->sc_control_data, sizeof(struct epic_control_data), NULL, |
152 | BUS_DMA_NOWAIT)) != 0) { |
153 | aprint_error_dev(sc->sc_dev, |
154 | "unable to load control data DMA map, error = %d\n" , |
155 | error); |
156 | goto fail_3; |
157 | } |
158 | |
159 | /* |
160 | * Create the transmit buffer DMA maps. |
161 | */ |
162 | for (i = 0; i < EPIC_NTXDESC; i++) { |
163 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
164 | EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, |
165 | &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { |
166 | aprint_error_dev(sc->sc_dev, |
167 | "unable to create tx DMA map %d, error = %d\n" , |
168 | i, error); |
169 | goto fail_4; |
170 | } |
171 | } |
172 | |
173 | /* |
174 | * Create the receive buffer DMA maps. |
175 | */ |
176 | for (i = 0; i < EPIC_NRXDESC; i++) { |
177 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
178 | MCLBYTES, 0, BUS_DMA_NOWAIT, |
179 | &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { |
180 | aprint_error_dev(sc->sc_dev, |
181 | "unable to create rx DMA map %d, error = %d\n" , |
182 | i, error); |
183 | goto fail_5; |
184 | } |
185 | EPIC_DSRX(sc, i)->ds_mbuf = NULL; |
186 | } |
187 | |
188 | /* |
189 | * create and map the pad buffer |
190 | */ |
191 | if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, |
192 | ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { |
193 | aprint_error_dev(sc->sc_dev, |
194 | "unable to create pad buffer DMA map, error = %d\n" , error); |
195 | goto fail_5; |
196 | } |
197 | |
198 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, |
199 | nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { |
200 | aprint_error_dev(sc->sc_dev, |
201 | "unable to load pad buffer DMA map, error = %d\n" , error); |
202 | goto fail_6; |
203 | } |
204 | bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, |
205 | BUS_DMASYNC_PREWRITE); |
206 | |
207 | /* |
208 | * Bring the chip out of low-power mode and reset it to a known state. |
209 | */ |
210 | bus_space_write_4(st, sh, EPIC_GENCTL, 0); |
211 | epic_reset(sc); |
212 | |
213 | /* |
214 | * Read the Ethernet address from the EEPROM. |
215 | */ |
216 | epic_read_eeprom(sc, 0, __arraycount(myea), myea); |
217 | for (i = 0; i < __arraycount(myea); i++) { |
218 | enaddr[i * 2] = myea[i] & 0xff; |
219 | enaddr[i * 2 + 1] = myea[i] >> 8; |
220 | } |
221 | |
222 | /* |
223 | * ...and the device name. |
224 | */ |
225 | epic_read_eeprom(sc, 0x2c, __arraycount(mydevname), mydevname); |
226 | for (i = 0; i < __arraycount(mydevname); i++) { |
227 | devname[i * 2] = mydevname[i] & 0xff; |
228 | devname[i * 2 + 1] = mydevname[i] >> 8; |
229 | } |
230 | |
231 | devname[sizeof(mydevname)] = '\0'; |
232 | for (i = sizeof(mydevname) ; i > 0; i--) { |
233 | if (devname[i - 1] == ' ') |
234 | devname[i - 1] = '\0'; |
235 | else |
236 | break; |
237 | } |
238 | |
239 | aprint_normal_dev(sc->sc_dev, "%s, Ethernet address %s\n" , |
240 | devname, ether_sprintf(enaddr)); |
241 | |
242 | miiflags = 0; |
243 | if (sc->sc_hwflags & EPIC_HAS_MII_FIBER) |
244 | miiflags |= MIIF_HAVEFIBER; |
245 | |
246 | /* |
247 | * Initialize our media structures and probe the MII. |
248 | */ |
249 | sc->sc_mii.mii_ifp = ifp; |
250 | sc->sc_mii.mii_readreg = epic_mii_read; |
251 | sc->sc_mii.mii_writereg = epic_mii_write; |
252 | sc->sc_mii.mii_statchg = epic_statchg; |
253 | |
254 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
255 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange, |
256 | ether_mediastatus); |
257 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
258 | MII_OFFSET_ANY, miiflags); |
259 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { |
260 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
261 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); |
262 | } else |
263 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
264 | |
265 | if (sc->sc_hwflags & EPIC_HAS_BNC) { |
266 | /* use the next free media instance */ |
267 | sc->sc_serinst = sc->sc_mii.mii_instance++; |
268 | ifmedia_add(&sc->sc_mii.mii_media, |
269 | IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->sc_serinst), |
270 | 0, NULL); |
271 | aprint_normal_dev(sc->sc_dev, "10base2/BNC\n" ); |
272 | } else |
273 | sc->sc_serinst = -1; |
274 | |
275 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
276 | ifp->if_softc = sc; |
277 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
278 | ifp->if_ioctl = epic_ioctl; |
279 | ifp->if_start = epic_start; |
280 | ifp->if_watchdog = epic_watchdog; |
281 | ifp->if_init = epic_init; |
282 | ifp->if_stop = epic_stop; |
283 | IFQ_SET_READY(&ifp->if_snd); |
284 | |
285 | /* |
286 | * We can support 802.1Q VLAN-sized frames. |
287 | */ |
288 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
289 | |
290 | /* |
291 | * Attach the interface. |
292 | */ |
293 | if_attach(ifp); |
294 | ether_ifattach(ifp, enaddr); |
295 | |
296 | /* |
297 | * Make sure the interface is shutdown during reboot. |
298 | */ |
299 | if (pmf_device_register1(sc->sc_dev, NULL, NULL, epic_shutdown)) |
300 | pmf_class_network_register(sc->sc_dev, ifp); |
301 | else |
302 | aprint_error_dev(sc->sc_dev, |
303 | "couldn't establish power handler\n" ); |
304 | |
305 | return; |
306 | |
307 | /* |
308 | * Free any resources we've allocated during the failed attach |
309 | * attempt. Do this in reverse order and fall through. |
310 | */ |
311 | fail_6: |
312 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); |
313 | fail_5: |
314 | for (i = 0; i < EPIC_NRXDESC; i++) { |
315 | if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) |
316 | bus_dmamap_destroy(sc->sc_dmat, |
317 | EPIC_DSRX(sc, i)->ds_dmamap); |
318 | } |
319 | fail_4: |
320 | for (i = 0; i < EPIC_NTXDESC; i++) { |
321 | if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) |
322 | bus_dmamap_destroy(sc->sc_dmat, |
323 | EPIC_DSTX(sc, i)->ds_dmamap); |
324 | } |
325 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); |
326 | fail_3: |
327 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); |
328 | fail_2: |
329 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, |
330 | sizeof(struct epic_control_data)); |
331 | fail_1: |
332 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
333 | fail_0: |
334 | return; |
335 | } |
336 | |
337 | /* |
338 | * Shutdown hook. Make sure the interface is stopped at reboot. |
339 | */ |
340 | bool |
341 | epic_shutdown(device_t self, int howto) |
342 | { |
343 | struct epic_softc *sc = device_private(self); |
344 | |
345 | epic_stop(&sc->sc_ethercom.ec_if, 1); |
346 | |
347 | return true; |
348 | } |
349 | |
350 | /* |
351 | * Start packet transmission on the interface. |
352 | * [ifnet interface function] |
353 | */ |
354 | void |
355 | epic_start(struct ifnet *ifp) |
356 | { |
357 | struct epic_softc *sc = ifp->if_softc; |
358 | struct mbuf *m0, *m; |
359 | struct epic_txdesc *txd; |
360 | struct epic_descsoft *ds; |
361 | struct epic_fraglist *fr; |
362 | bus_dmamap_t dmamap; |
363 | int error, firsttx, nexttx, opending, seg; |
364 | u_int len; |
365 | |
366 | /* |
367 | * Remember the previous txpending and the first transmit |
368 | * descriptor we use. |
369 | */ |
370 | opending = sc->sc_txpending; |
371 | firsttx = EPIC_NEXTTX(sc->sc_txlast); |
372 | |
373 | /* |
374 | * Loop through the send queue, setting up transmit descriptors |
375 | * until we drain the queue, or use up all available transmit |
376 | * descriptors. |
377 | */ |
378 | while (sc->sc_txpending < EPIC_NTXDESC) { |
379 | /* |
380 | * Grab a packet off the queue. |
381 | */ |
382 | IFQ_POLL(&ifp->if_snd, m0); |
383 | if (m0 == NULL) |
384 | break; |
385 | m = NULL; |
386 | |
387 | /* |
388 | * Get the last and next available transmit descriptor. |
389 | */ |
390 | nexttx = EPIC_NEXTTX(sc->sc_txlast); |
391 | txd = EPIC_CDTX(sc, nexttx); |
392 | fr = EPIC_CDFL(sc, nexttx); |
393 | ds = EPIC_DSTX(sc, nexttx); |
394 | dmamap = ds->ds_dmamap; |
395 | |
396 | /* |
397 | * Load the DMA map. If this fails, the packet either |
398 | * didn't fit in the alloted number of frags, or we were |
399 | * short on resources. In this case, we'll copy and try |
400 | * again. |
401 | */ |
402 | if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
403 | BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || |
404 | (m0->m_pkthdr.len < ETHER_PAD_LEN && |
405 | dmamap-> dm_nsegs == EPIC_NFRAGS)) { |
406 | if (error == 0) |
407 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
408 | |
409 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
410 | if (m == NULL) { |
411 | printf("%s: unable to allocate Tx mbuf\n" , |
412 | device_xname(sc->sc_dev)); |
413 | break; |
414 | } |
415 | if (m0->m_pkthdr.len > MHLEN) { |
416 | MCLGET(m, M_DONTWAIT); |
417 | if ((m->m_flags & M_EXT) == 0) { |
418 | printf("%s: unable to allocate Tx " |
419 | "cluster\n" , |
420 | device_xname(sc->sc_dev)); |
421 | m_freem(m); |
422 | break; |
423 | } |
424 | } |
425 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); |
426 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; |
427 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, |
428 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
429 | if (error) { |
430 | printf("%s: unable to load Tx buffer, " |
431 | "error = %d\n" , device_xname(sc->sc_dev), |
432 | error); |
433 | break; |
434 | } |
435 | } |
436 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
437 | if (m != NULL) { |
438 | m_freem(m0); |
439 | m0 = m; |
440 | } |
441 | |
442 | /* Initialize the fraglist. */ |
443 | for (seg = 0; seg < dmamap->dm_nsegs; seg++) { |
444 | fr->ef_frags[seg].ef_addr = |
445 | dmamap->dm_segs[seg].ds_addr; |
446 | fr->ef_frags[seg].ef_length = |
447 | dmamap->dm_segs[seg].ds_len; |
448 | } |
449 | len = m0->m_pkthdr.len; |
450 | if (len < ETHER_PAD_LEN) { |
451 | fr->ef_frags[seg].ef_addr = sc->sc_nulldma; |
452 | fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len; |
453 | len = ETHER_PAD_LEN; |
454 | seg++; |
455 | } |
456 | fr->ef_nfrags = seg; |
457 | |
458 | EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE); |
459 | |
460 | /* Sync the DMA map. */ |
461 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
462 | BUS_DMASYNC_PREWRITE); |
463 | |
464 | /* |
465 | * Store a pointer to the packet so we can free it later. |
466 | */ |
467 | ds->ds_mbuf = m0; |
468 | |
469 | /* |
470 | * Fill in the transmit descriptor. |
471 | */ |
472 | txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST; |
473 | |
474 | /* |
475 | * If this is the first descriptor we're enqueueing, |
476 | * don't give it to the EPIC yet. That could cause |
477 | * a race condition. We'll do it below. |
478 | */ |
479 | if (nexttx == firsttx) |
480 | txd->et_txstatus = TXSTAT_TXLENGTH(len); |
481 | else |
482 | txd->et_txstatus = |
483 | TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER; |
484 | |
485 | EPIC_CDTXSYNC(sc, nexttx, |
486 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
487 | |
488 | /* Advance the tx pointer. */ |
489 | sc->sc_txpending++; |
490 | sc->sc_txlast = nexttx; |
491 | |
492 | /* |
493 | * Pass the packet to any BPF listeners. |
494 | */ |
495 | bpf_mtap(ifp, m0); |
496 | } |
497 | |
498 | if (sc->sc_txpending == EPIC_NTXDESC) { |
499 | /* No more slots left; notify upper layer. */ |
500 | ifp->if_flags |= IFF_OACTIVE; |
501 | } |
502 | |
503 | if (sc->sc_txpending != opending) { |
504 | /* |
505 | * We enqueued packets. If the transmitter was idle, |
506 | * reset the txdirty pointer. |
507 | */ |
508 | if (opending == 0) |
509 | sc->sc_txdirty = firsttx; |
510 | |
511 | /* |
512 | * Cause a transmit interrupt to happen on the |
513 | * last packet we enqueued. |
514 | */ |
515 | EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF; |
516 | EPIC_CDTXSYNC(sc, sc->sc_txlast, |
517 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
518 | |
519 | /* |
520 | * The entire packet chain is set up. Give the |
521 | * first descriptor to the EPIC now. |
522 | */ |
523 | EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER; |
524 | EPIC_CDTXSYNC(sc, firsttx, |
525 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
526 | |
527 | /* Start the transmitter. */ |
528 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, |
529 | COMMAND_TXQUEUED); |
530 | |
531 | /* Set a watchdog timer in case the chip flakes out. */ |
532 | ifp->if_timer = 5; |
533 | } |
534 | } |
535 | |
536 | /* |
537 | * Watchdog timer handler. |
538 | * [ifnet interface function] |
539 | */ |
540 | void |
541 | epic_watchdog(struct ifnet *ifp) |
542 | { |
543 | struct epic_softc *sc = ifp->if_softc; |
544 | |
545 | printf("%s: device timeout\n" , device_xname(sc->sc_dev)); |
546 | ifp->if_oerrors++; |
547 | |
548 | (void)epic_init(ifp); |
549 | } |
550 | |
551 | /* |
552 | * Handle control requests from the operator. |
553 | * [ifnet interface function] |
554 | */ |
555 | int |
556 | epic_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
557 | { |
558 | struct epic_softc *sc = ifp->if_softc; |
559 | int s, error; |
560 | |
561 | s = splnet(); |
562 | |
563 | error = ether_ioctl(ifp, cmd, data); |
564 | if (error == ENETRESET) { |
565 | /* |
566 | * Multicast list has changed; set the hardware filter |
567 | * accordingly. Update our idea of the current media; |
568 | * epic_set_mchash() needs to know what it is. |
569 | */ |
570 | if (ifp->if_flags & IFF_RUNNING) { |
571 | mii_pollstat(&sc->sc_mii); |
572 | epic_set_mchash(sc); |
573 | } |
574 | error = 0; |
575 | } |
576 | |
577 | splx(s); |
578 | return error; |
579 | } |
580 | |
581 | /* |
582 | * Interrupt handler. |
583 | */ |
584 | int |
585 | epic_intr(void *arg) |
586 | { |
587 | struct epic_softc *sc = arg; |
588 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
589 | struct epic_rxdesc *rxd; |
590 | struct epic_txdesc *txd; |
591 | struct epic_descsoft *ds; |
592 | struct mbuf *m; |
593 | uint32_t intstat, rxstatus, txstatus; |
594 | int i, claimed = 0; |
595 | u_int len; |
596 | |
597 | top: |
598 | /* |
599 | * Get the interrupt status from the EPIC. |
600 | */ |
601 | intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT); |
602 | if ((intstat & INTSTAT_INT_ACTV) == 0) |
603 | return claimed; |
604 | |
605 | claimed = 1; |
606 | |
607 | /* |
608 | * Acknowledge the interrupt. |
609 | */ |
610 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT, |
611 | intstat & INTMASK); |
612 | |
613 | /* |
614 | * Check for receive interrupts. |
615 | */ |
616 | if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) { |
617 | for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) { |
618 | rxd = EPIC_CDRX(sc, i); |
619 | ds = EPIC_DSRX(sc, i); |
620 | |
621 | EPIC_CDRXSYNC(sc, i, |
622 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
623 | |
624 | rxstatus = rxd->er_rxstatus; |
625 | if (rxstatus & ER_RXSTAT_OWNER) { |
626 | /* |
627 | * We have processed all of the |
628 | * receive buffers. |
629 | */ |
630 | break; |
631 | } |
632 | |
633 | /* |
634 | * Make sure the packet arrived intact. If an error |
635 | * occurred, update stats and reset the descriptor. |
636 | * The buffer will be reused the next time the |
637 | * descriptor comes up in the ring. |
638 | */ |
639 | if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) { |
640 | if (rxstatus & ER_RXSTAT_CRCERROR) |
641 | printf("%s: CRC error\n" , |
642 | device_xname(sc->sc_dev)); |
643 | if (rxstatus & ER_RXSTAT_ALIGNERROR) |
644 | printf("%s: alignment error\n" , |
645 | device_xname(sc->sc_dev)); |
646 | ifp->if_ierrors++; |
647 | EPIC_INIT_RXDESC(sc, i); |
648 | continue; |
649 | } |
650 | |
651 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
652 | ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
653 | |
654 | /* |
655 | * The EPIC includes the CRC with every packet; |
656 | * trim it. |
657 | */ |
658 | len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN; |
659 | |
660 | if (len < sizeof(struct ether_header)) { |
661 | /* |
662 | * Runt packet; drop it now. |
663 | */ |
664 | ifp->if_ierrors++; |
665 | EPIC_INIT_RXDESC(sc, i); |
666 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
667 | ds->ds_dmamap->dm_mapsize, |
668 | BUS_DMASYNC_PREREAD); |
669 | continue; |
670 | } |
671 | |
672 | /* |
673 | * If the packet is small enough to fit in a |
674 | * single header mbuf, allocate one and copy |
675 | * the data into it. This greatly reduces |
676 | * memory consumption when we receive lots |
677 | * of small packets. |
678 | * |
679 | * Otherwise, we add a new buffer to the receive |
680 | * chain. If this fails, we drop the packet and |
681 | * recycle the old buffer. |
682 | */ |
683 | if (epic_copy_small != 0 && len <= MHLEN) { |
684 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
685 | if (m == NULL) |
686 | goto dropit; |
687 | memcpy(mtod(m, void *), |
688 | mtod(ds->ds_mbuf, void *), len); |
689 | EPIC_INIT_RXDESC(sc, i); |
690 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
691 | ds->ds_dmamap->dm_mapsize, |
692 | BUS_DMASYNC_PREREAD); |
693 | } else { |
694 | m = ds->ds_mbuf; |
695 | if (epic_add_rxbuf(sc, i) != 0) { |
696 | dropit: |
697 | ifp->if_ierrors++; |
698 | EPIC_INIT_RXDESC(sc, i); |
699 | bus_dmamap_sync(sc->sc_dmat, |
700 | ds->ds_dmamap, 0, |
701 | ds->ds_dmamap->dm_mapsize, |
702 | BUS_DMASYNC_PREREAD); |
703 | continue; |
704 | } |
705 | } |
706 | |
707 | m_set_rcvif(m, ifp); |
708 | m->m_pkthdr.len = m->m_len = len; |
709 | |
710 | /* |
711 | * Pass this up to any BPF listeners, but only |
712 | * pass it up the stack if it's for us. |
713 | */ |
714 | bpf_mtap(ifp, m); |
715 | |
716 | /* Pass it on. */ |
717 | if_percpuq_enqueue(ifp->if_percpuq, m); |
718 | ifp->if_ipackets++; |
719 | } |
720 | |
721 | /* Update the receive pointer. */ |
722 | sc->sc_rxptr = i; |
723 | |
724 | /* |
725 | * Check for receive queue underflow. |
726 | */ |
727 | if (intstat & INTSTAT_RQE) { |
728 | printf("%s: receiver queue empty\n" , |
729 | device_xname(sc->sc_dev)); |
730 | /* |
731 | * Ring is already built; just restart the |
732 | * receiver. |
733 | */ |
734 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR, |
735 | EPIC_CDRXADDR(sc, sc->sc_rxptr)); |
736 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, |
737 | COMMAND_RXQUEUED | COMMAND_START_RX); |
738 | } |
739 | } |
740 | |
741 | /* |
742 | * Check for transmission complete interrupts. |
743 | */ |
744 | if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) { |
745 | ifp->if_flags &= ~IFF_OACTIVE; |
746 | for (i = sc->sc_txdirty; sc->sc_txpending != 0; |
747 | i = EPIC_NEXTTX(i), sc->sc_txpending--) { |
748 | txd = EPIC_CDTX(sc, i); |
749 | ds = EPIC_DSTX(sc, i); |
750 | |
751 | EPIC_CDTXSYNC(sc, i, |
752 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
753 | |
754 | txstatus = txd->et_txstatus; |
755 | if (txstatus & ET_TXSTAT_OWNER) |
756 | break; |
757 | |
758 | EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE); |
759 | |
760 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, |
761 | 0, ds->ds_dmamap->dm_mapsize, |
762 | BUS_DMASYNC_POSTWRITE); |
763 | bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); |
764 | m_freem(ds->ds_mbuf); |
765 | ds->ds_mbuf = NULL; |
766 | |
767 | /* |
768 | * Check for errors and collisions. |
769 | */ |
770 | if ((txstatus & ET_TXSTAT_PACKETTX) == 0) |
771 | ifp->if_oerrors++; |
772 | else |
773 | ifp->if_opackets++; |
774 | ifp->if_collisions += |
775 | TXSTAT_COLLISIONS(txstatus); |
776 | if (txstatus & ET_TXSTAT_CARSENSELOST) |
777 | printf("%s: lost carrier\n" , |
778 | device_xname(sc->sc_dev)); |
779 | } |
780 | |
781 | /* Update the dirty transmit buffer pointer. */ |
782 | sc->sc_txdirty = i; |
783 | |
784 | /* |
785 | * Cancel the watchdog timer if there are no pending |
786 | * transmissions. |
787 | */ |
788 | if (sc->sc_txpending == 0) |
789 | ifp->if_timer = 0; |
790 | |
791 | /* |
792 | * Kick the transmitter after a DMA underrun. |
793 | */ |
794 | if (intstat & INTSTAT_TXU) { |
795 | printf("%s: transmit underrun\n" , |
796 | device_xname(sc->sc_dev)); |
797 | bus_space_write_4(sc->sc_st, sc->sc_sh, |
798 | EPIC_COMMAND, COMMAND_TXUGO); |
799 | if (sc->sc_txpending) |
800 | bus_space_write_4(sc->sc_st, sc->sc_sh, |
801 | EPIC_COMMAND, COMMAND_TXQUEUED); |
802 | } |
803 | |
804 | /* |
805 | * Try to get more packets going. |
806 | */ |
807 | epic_start(ifp); |
808 | } |
809 | |
810 | /* |
811 | * Check for fatal interrupts. |
812 | */ |
813 | if (intstat & INTSTAT_FATAL_INT) { |
814 | if (intstat & INTSTAT_PTA) |
815 | printf("%s: PCI target abort error\n" , |
816 | device_xname(sc->sc_dev)); |
817 | else if (intstat & INTSTAT_PMA) |
818 | printf("%s: PCI master abort error\n" , |
819 | device_xname(sc->sc_dev)); |
820 | else if (intstat & INTSTAT_APE) |
821 | printf("%s: PCI address parity error\n" , |
822 | device_xname(sc->sc_dev)); |
823 | else if (intstat & INTSTAT_DPE) |
824 | printf("%s: PCI data parity error\n" , |
825 | device_xname(sc->sc_dev)); |
826 | else |
827 | printf("%s: unknown fatal error\n" , |
828 | device_xname(sc->sc_dev)); |
829 | (void)epic_init(ifp); |
830 | } |
831 | |
832 | /* |
833 | * Check for more interrupts. |
834 | */ |
835 | goto top; |
836 | } |
837 | |
838 | /* |
839 | * One second timer, used to tick the MII. |
840 | */ |
841 | void |
842 | epic_tick(void *arg) |
843 | { |
844 | struct epic_softc *sc = arg; |
845 | int s; |
846 | |
847 | s = splnet(); |
848 | mii_tick(&sc->sc_mii); |
849 | splx(s); |
850 | |
851 | callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc); |
852 | } |
853 | |
854 | /* |
855 | * Fixup the clock source on the EPIC. |
856 | */ |
857 | void |
858 | epic_fixup_clock_source(struct epic_softc *sc) |
859 | { |
860 | int i; |
861 | |
862 | /* |
863 | * According to SMC Application Note 7-15, the EPIC's clock |
864 | * source is incorrect following a reset. This manifests itself |
865 | * as failure to recognize when host software has written to |
866 | * a register on the EPIC. The appnote recommends issuing at |
867 | * least 16 consecutive writes to the CLOCK TEST bit to correctly |
868 | * configure the clock source. |
869 | */ |
870 | for (i = 0; i < 16; i++) |
871 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST, |
872 | TEST_CLOCKTEST); |
873 | } |
874 | |
875 | /* |
876 | * Perform a soft reset on the EPIC. |
877 | */ |
878 | void |
879 | epic_reset(struct epic_softc *sc) |
880 | { |
881 | |
882 | epic_fixup_clock_source(sc); |
883 | |
884 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0); |
885 | delay(100); |
886 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET); |
887 | delay(100); |
888 | |
889 | epic_fixup_clock_source(sc); |
890 | } |
891 | |
892 | /* |
893 | * Initialize the interface. Must be called at splnet(). |
894 | */ |
895 | int |
896 | epic_init(struct ifnet *ifp) |
897 | { |
898 | struct epic_softc *sc = ifp->if_softc; |
899 | bus_space_tag_t st = sc->sc_st; |
900 | bus_space_handle_t sh = sc->sc_sh; |
901 | const uint8_t *enaddr = CLLADDR(ifp->if_sadl); |
902 | struct epic_txdesc *txd; |
903 | struct epic_descsoft *ds; |
904 | uint32_t genctl, reg0; |
905 | int i, error = 0; |
906 | |
907 | /* |
908 | * Cancel any pending I/O. |
909 | */ |
910 | epic_stop(ifp, 0); |
911 | |
912 | /* |
913 | * Reset the EPIC to a known state. |
914 | */ |
915 | epic_reset(sc); |
916 | |
917 | /* |
918 | * Magical mystery initialization. |
919 | */ |
920 | bus_space_write_4(st, sh, EPIC_TXTEST, 0); |
921 | |
922 | /* |
923 | * Initialize the EPIC genctl register: |
924 | * |
925 | * - 64 byte receive FIFO threshold |
926 | * - automatic advance to next receive frame |
927 | */ |
928 | genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY; |
929 | #if BYTE_ORDER == BIG_ENDIAN |
930 | genctl |= GENCTL_BIG_ENDIAN; |
931 | #endif |
932 | bus_space_write_4(st, sh, EPIC_GENCTL, genctl); |
933 | |
934 | /* |
935 | * Reset the MII bus and PHY. |
936 | */ |
937 | reg0 = bus_space_read_4(st, sh, EPIC_NVCTL); |
938 | bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1); |
939 | bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER); |
940 | bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY); |
941 | delay(100); |
942 | bus_space_write_4(st, sh, EPIC_GENCTL, genctl); |
943 | delay(1000); |
944 | bus_space_write_4(st, sh, EPIC_NVCTL, reg0); |
945 | |
946 | /* |
947 | * Initialize Ethernet address. |
948 | */ |
949 | reg0 = enaddr[1] << 8 | enaddr[0]; |
950 | bus_space_write_4(st, sh, EPIC_LAN0, reg0); |
951 | reg0 = enaddr[3] << 8 | enaddr[2]; |
952 | bus_space_write_4(st, sh, EPIC_LAN1, reg0); |
953 | reg0 = enaddr[5] << 8 | enaddr[4]; |
954 | bus_space_write_4(st, sh, EPIC_LAN2, reg0); |
955 | |
956 | /* |
957 | * Initialize receive control. Remember the external buffer |
958 | * size setting. |
959 | */ |
960 | reg0 = bus_space_read_4(st, sh, EPIC_RXCON) & |
961 | (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0); |
962 | reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST); |
963 | if (ifp->if_flags & IFF_PROMISC) |
964 | reg0 |= RXCON_PROMISCMODE; |
965 | bus_space_write_4(st, sh, EPIC_RXCON, reg0); |
966 | |
967 | /* Set the current media. */ |
968 | if ((error = epic_mediachange(ifp)) != 0) |
969 | goto out; |
970 | |
971 | /* Set up the multicast hash table. */ |
972 | epic_set_mchash(sc); |
973 | |
974 | /* |
975 | * Initialize the transmit descriptor ring. txlast is initialized |
976 | * to the end of the list so that it will wrap around to the first |
977 | * descriptor when the first packet is transmitted. |
978 | */ |
979 | for (i = 0; i < EPIC_NTXDESC; i++) { |
980 | txd = EPIC_CDTX(sc, i); |
981 | memset(txd, 0, sizeof(struct epic_txdesc)); |
982 | txd->et_bufaddr = EPIC_CDFLADDR(sc, i); |
983 | txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i)); |
984 | EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
985 | } |
986 | sc->sc_txpending = 0; |
987 | sc->sc_txdirty = 0; |
988 | sc->sc_txlast = EPIC_NTXDESC - 1; |
989 | |
990 | /* |
991 | * Initialize the receive descriptor ring. |
992 | */ |
993 | for (i = 0; i < EPIC_NRXDESC; i++) { |
994 | ds = EPIC_DSRX(sc, i); |
995 | if (ds->ds_mbuf == NULL) { |
996 | if ((error = epic_add_rxbuf(sc, i)) != 0) { |
997 | printf("%s: unable to allocate or map rx " |
998 | "buffer %d error = %d\n" , |
999 | device_xname(sc->sc_dev), i, error); |
1000 | /* |
1001 | * XXX Should attempt to run with fewer receive |
1002 | * XXX buffers instead of just failing. |
1003 | */ |
1004 | epic_rxdrain(sc); |
1005 | goto out; |
1006 | } |
1007 | } else |
1008 | EPIC_INIT_RXDESC(sc, i); |
1009 | } |
1010 | sc->sc_rxptr = 0; |
1011 | |
1012 | /* |
1013 | * Initialize the interrupt mask and enable interrupts. |
1014 | */ |
1015 | bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK); |
1016 | bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA); |
1017 | |
1018 | /* |
1019 | * Give the transmit and receive rings to the EPIC. |
1020 | */ |
1021 | bus_space_write_4(st, sh, EPIC_PTCDAR, |
1022 | EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast))); |
1023 | bus_space_write_4(st, sh, EPIC_PRCDAR, |
1024 | EPIC_CDRXADDR(sc, sc->sc_rxptr)); |
1025 | |
1026 | /* |
1027 | * Set the EPIC in motion. |
1028 | */ |
1029 | bus_space_write_4(st, sh, EPIC_COMMAND, |
1030 | COMMAND_RXQUEUED | COMMAND_START_RX); |
1031 | |
1032 | /* |
1033 | * ...all done! |
1034 | */ |
1035 | ifp->if_flags |= IFF_RUNNING; |
1036 | ifp->if_flags &= ~IFF_OACTIVE; |
1037 | |
1038 | /* |
1039 | * Start the one second clock. |
1040 | */ |
1041 | callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc); |
1042 | |
1043 | /* |
1044 | * Attempt to start output on the interface. |
1045 | */ |
1046 | epic_start(ifp); |
1047 | |
1048 | out: |
1049 | if (error) |
1050 | printf("%s: interface not running\n" , device_xname(sc->sc_dev)); |
1051 | return error; |
1052 | } |
1053 | |
1054 | /* |
1055 | * Drain the receive queue. |
1056 | */ |
1057 | void |
1058 | epic_rxdrain(struct epic_softc *sc) |
1059 | { |
1060 | struct epic_descsoft *ds; |
1061 | int i; |
1062 | |
1063 | for (i = 0; i < EPIC_NRXDESC; i++) { |
1064 | ds = EPIC_DSRX(sc, i); |
1065 | if (ds->ds_mbuf != NULL) { |
1066 | bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); |
1067 | m_freem(ds->ds_mbuf); |
1068 | ds->ds_mbuf = NULL; |
1069 | } |
1070 | } |
1071 | } |
1072 | |
1073 | /* |
1074 | * Stop transmission on the interface. |
1075 | */ |
1076 | void |
1077 | epic_stop(struct ifnet *ifp, int disable) |
1078 | { |
1079 | struct epic_softc *sc = ifp->if_softc; |
1080 | bus_space_tag_t st = sc->sc_st; |
1081 | bus_space_handle_t sh = sc->sc_sh; |
1082 | struct epic_descsoft *ds; |
1083 | uint32_t reg; |
1084 | int i; |
1085 | |
1086 | /* |
1087 | * Stop the one second clock. |
1088 | */ |
1089 | callout_stop(&sc->sc_mii_callout); |
1090 | |
1091 | /* Down the MII. */ |
1092 | mii_down(&sc->sc_mii); |
1093 | |
1094 | /* Paranoia... */ |
1095 | epic_fixup_clock_source(sc); |
1096 | |
1097 | /* |
1098 | * Disable interrupts. |
1099 | */ |
1100 | reg = bus_space_read_4(st, sh, EPIC_GENCTL); |
1101 | bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA); |
1102 | bus_space_write_4(st, sh, EPIC_INTMASK, 0); |
1103 | |
1104 | /* |
1105 | * Stop the DMA engine and take the receiver off-line. |
1106 | */ |
1107 | bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA | |
1108 | COMMAND_STOP_TDMA | COMMAND_STOP_RX); |
1109 | |
1110 | /* |
1111 | * Release any queued transmit buffers. |
1112 | */ |
1113 | for (i = 0; i < EPIC_NTXDESC; i++) { |
1114 | ds = EPIC_DSTX(sc, i); |
1115 | if (ds->ds_mbuf != NULL) { |
1116 | bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); |
1117 | m_freem(ds->ds_mbuf); |
1118 | ds->ds_mbuf = NULL; |
1119 | } |
1120 | } |
1121 | |
1122 | /* |
1123 | * Mark the interface down and cancel the watchdog timer. |
1124 | */ |
1125 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1126 | ifp->if_timer = 0; |
1127 | |
1128 | if (disable) |
1129 | epic_rxdrain(sc); |
1130 | } |
1131 | |
1132 | /* |
1133 | * Read the EPIC Serial EEPROM. |
1134 | */ |
1135 | void |
1136 | epic_read_eeprom(struct epic_softc *sc, int word, int wordcnt, uint16_t *data) |
1137 | { |
1138 | bus_space_tag_t st = sc->sc_st; |
1139 | bus_space_handle_t sh = sc->sc_sh; |
1140 | uint16_t reg; |
1141 | int i, x; |
1142 | |
1143 | #define EEPROM_WAIT_READY(st, sh) \ |
1144 | while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \ |
1145 | /* nothing */ |
1146 | |
1147 | /* |
1148 | * Enable the EEPROM. |
1149 | */ |
1150 | bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); |
1151 | EEPROM_WAIT_READY(st, sh); |
1152 | |
1153 | for (i = 0; i < wordcnt; i++) { |
1154 | /* Send CHIP SELECT for one clock tick. */ |
1155 | bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS); |
1156 | EEPROM_WAIT_READY(st, sh); |
1157 | |
1158 | /* Shift in the READ opcode. */ |
1159 | for (x = 3; x > 0; x--) { |
1160 | reg = EECTL_ENABLE|EECTL_EECS; |
1161 | if (EPIC_EEPROM_OPC_READ & (1 << (x - 1))) |
1162 | reg |= EECTL_EEDI; |
1163 | bus_space_write_4(st, sh, EPIC_EECTL, reg); |
1164 | EEPROM_WAIT_READY(st, sh); |
1165 | bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); |
1166 | EEPROM_WAIT_READY(st, sh); |
1167 | bus_space_write_4(st, sh, EPIC_EECTL, reg); |
1168 | EEPROM_WAIT_READY(st, sh); |
1169 | } |
1170 | |
1171 | /* Shift in address. */ |
1172 | for (x = 6; x > 0; x--) { |
1173 | reg = EECTL_ENABLE|EECTL_EECS; |
1174 | if ((word + i) & (1 << (x - 1))) |
1175 | reg |= EECTL_EEDI; |
1176 | bus_space_write_4(st, sh, EPIC_EECTL, reg); |
1177 | EEPROM_WAIT_READY(st, sh); |
1178 | bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); |
1179 | EEPROM_WAIT_READY(st, sh); |
1180 | bus_space_write_4(st, sh, EPIC_EECTL, reg); |
1181 | EEPROM_WAIT_READY(st, sh); |
1182 | } |
1183 | |
1184 | /* Shift out data. */ |
1185 | reg = EECTL_ENABLE|EECTL_EECS; |
1186 | data[i] = 0; |
1187 | for (x = 16; x > 0; x--) { |
1188 | bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); |
1189 | EEPROM_WAIT_READY(st, sh); |
1190 | if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO) |
1191 | data[i] |= (1 << (x - 1)); |
1192 | bus_space_write_4(st, sh, EPIC_EECTL, reg); |
1193 | EEPROM_WAIT_READY(st, sh); |
1194 | } |
1195 | |
1196 | /* Clear CHIP SELECT. */ |
1197 | bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); |
1198 | EEPROM_WAIT_READY(st, sh); |
1199 | } |
1200 | |
1201 | /* |
1202 | * Disable the EEPROM. |
1203 | */ |
1204 | bus_space_write_4(st, sh, EPIC_EECTL, 0); |
1205 | |
1206 | #undef EEPROM_WAIT_READY |
1207 | } |
1208 | |
1209 | /* |
1210 | * Add a receive buffer to the indicated descriptor. |
1211 | */ |
1212 | int |
1213 | epic_add_rxbuf(struct epic_softc *sc, int idx) |
1214 | { |
1215 | struct epic_descsoft *ds = EPIC_DSRX(sc, idx); |
1216 | struct mbuf *m; |
1217 | int error; |
1218 | |
1219 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1220 | if (m == NULL) |
1221 | return ENOBUFS; |
1222 | |
1223 | MCLGET(m, M_DONTWAIT); |
1224 | if ((m->m_flags & M_EXT) == 0) { |
1225 | m_freem(m); |
1226 | return ENOBUFS; |
1227 | } |
1228 | |
1229 | if (ds->ds_mbuf != NULL) |
1230 | bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); |
1231 | |
1232 | ds->ds_mbuf = m; |
1233 | |
1234 | error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, |
1235 | m->m_ext.ext_buf, m->m_ext.ext_size, NULL, |
1236 | BUS_DMA_READ|BUS_DMA_NOWAIT); |
1237 | if (error) { |
1238 | printf("%s: can't load rx DMA map %d, error = %d\n" , |
1239 | device_xname(sc->sc_dev), idx, error); |
1240 | panic("%s" , __func__); /* XXX */ |
1241 | } |
1242 | |
1243 | bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, |
1244 | ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
1245 | |
1246 | EPIC_INIT_RXDESC(sc, idx); |
1247 | |
1248 | return 0; |
1249 | } |
1250 | |
1251 | /* |
1252 | * Set the EPIC multicast hash table. |
1253 | * |
1254 | * NOTE: We rely on a recently-updated mii_media_active here! |
1255 | */ |
1256 | void |
1257 | epic_set_mchash(struct epic_softc *sc) |
1258 | { |
1259 | struct ethercom *ec = &sc->sc_ethercom; |
1260 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1261 | struct ether_multi *enm; |
1262 | struct ether_multistep step; |
1263 | uint32_t hash, mchash[4]; |
1264 | |
1265 | /* |
1266 | * Set up the multicast address filter by passing all multicast |
1267 | * addresses through a CRC generator, and then using the low-order |
1268 | * 6 bits as an index into the 64 bit multicast hash table (only |
1269 | * the lower 16 bits of each 32 bit multicast hash register are |
1270 | * valid). The high order bits select the register, while the |
1271 | * rest of the bits select the bit within the register. |
1272 | */ |
1273 | |
1274 | if (ifp->if_flags & IFF_PROMISC) |
1275 | goto allmulti; |
1276 | |
1277 | if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) { |
1278 | /* XXX hardware bug in 10Mbps mode. */ |
1279 | goto allmulti; |
1280 | } |
1281 | |
1282 | mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0; |
1283 | |
1284 | ETHER_FIRST_MULTI(step, ec, enm); |
1285 | while (enm != NULL) { |
1286 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
1287 | /* |
1288 | * We must listen to a range of multicast addresses. |
1289 | * For now, just accept all multicasts, rather than |
1290 | * trying to set only those filter bits needed to match |
1291 | * the range. (At this time, the only use of address |
1292 | * ranges is for IP multicast routing, for which the |
1293 | * range is big enough to require all bits set.) |
1294 | */ |
1295 | goto allmulti; |
1296 | } |
1297 | |
1298 | hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); |
1299 | hash >>= 26; |
1300 | |
1301 | /* Set the corresponding bit in the hash table. */ |
1302 | mchash[hash >> 4] |= 1 << (hash & 0xf); |
1303 | |
1304 | ETHER_NEXT_MULTI(step, enm); |
1305 | } |
1306 | |
1307 | ifp->if_flags &= ~IFF_ALLMULTI; |
1308 | goto sethash; |
1309 | |
1310 | allmulti: |
1311 | ifp->if_flags |= IFF_ALLMULTI; |
1312 | mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff; |
1313 | |
1314 | sethash: |
1315 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]); |
1316 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]); |
1317 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]); |
1318 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]); |
1319 | } |
1320 | |
1321 | /* |
1322 | * Wait for the MII to become ready. |
1323 | */ |
1324 | int |
1325 | epic_mii_wait(struct epic_softc *sc, uint32_t rw) |
1326 | { |
1327 | int i; |
1328 | |
1329 | for (i = 0; i < 50; i++) { |
1330 | if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw) |
1331 | == 0) |
1332 | break; |
1333 | delay(2); |
1334 | } |
1335 | if (i == 50) { |
1336 | printf("%s: MII timed out\n" , device_xname(sc->sc_dev)); |
1337 | return 1; |
1338 | } |
1339 | |
1340 | return 0; |
1341 | } |
1342 | |
1343 | /* |
1344 | * Read from the MII. |
1345 | */ |
1346 | int |
1347 | epic_mii_read(device_t self, int phy, int reg) |
1348 | { |
1349 | struct epic_softc *sc = device_private(self); |
1350 | |
1351 | if (epic_mii_wait(sc, MMCTL_WRITE)) |
1352 | return 0; |
1353 | |
1354 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, |
1355 | MMCTL_ARG(phy, reg, MMCTL_READ)); |
1356 | |
1357 | if (epic_mii_wait(sc, MMCTL_READ)) |
1358 | return 0; |
1359 | |
1360 | return bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) & |
1361 | MMDATA_MASK; |
1362 | } |
1363 | |
1364 | /* |
1365 | * Write to the MII. |
1366 | */ |
1367 | void |
1368 | epic_mii_write(device_t self, int phy, int reg, int val) |
1369 | { |
1370 | struct epic_softc *sc = device_private(self); |
1371 | |
1372 | if (epic_mii_wait(sc, MMCTL_WRITE)) |
1373 | return; |
1374 | |
1375 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val); |
1376 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, |
1377 | MMCTL_ARG(phy, reg, MMCTL_WRITE)); |
1378 | } |
1379 | |
1380 | /* |
1381 | * Callback from PHY when media changes. |
1382 | */ |
1383 | void |
1384 | epic_statchg(struct ifnet *ifp) |
1385 | { |
1386 | struct epic_softc *sc = ifp->if_softc; |
1387 | uint32_t txcon, miicfg; |
1388 | |
1389 | /* |
1390 | * Update loopback bits in TXCON to reflect duplex mode. |
1391 | */ |
1392 | txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON); |
1393 | if (sc->sc_mii.mii_media_active & IFM_FDX) |
1394 | txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); |
1395 | else |
1396 | txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); |
1397 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon); |
1398 | |
1399 | /* On some cards we need manualy set fullduplex led */ |
1400 | if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) { |
1401 | miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); |
1402 | if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) |
1403 | miicfg |= MIICFG_ENABLE; |
1404 | else |
1405 | miicfg &= ~MIICFG_ENABLE; |
1406 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); |
1407 | } |
1408 | |
1409 | /* |
1410 | * There is a multicast filter bug in 10Mbps mode. Kick the |
1411 | * multicast filter in case the speed changed. |
1412 | */ |
1413 | epic_set_mchash(sc); |
1414 | } |
1415 | |
1416 | /* |
1417 | * Callback from ifmedia to request new media setting. |
1418 | * |
1419 | * XXX Looks to me like some of this complexity should move into |
1420 | * XXX one or two custom PHY drivers. --dyoung |
1421 | */ |
1422 | int |
1423 | epic_mediachange(struct ifnet *ifp) |
1424 | { |
1425 | struct epic_softc *sc = ifp->if_softc; |
1426 | struct mii_data *mii = &sc->sc_mii; |
1427 | struct ifmedia *ifm = &mii->mii_media; |
1428 | int media = ifm->ifm_cur->ifm_media; |
1429 | uint32_t miicfg; |
1430 | struct mii_softc *miisc; |
1431 | int cfg, rc; |
1432 | |
1433 | if ((ifp->if_flags & IFF_UP) == 0) |
1434 | return 0; |
1435 | |
1436 | if (IFM_INST(media) != sc->sc_serinst) { |
1437 | /* If we're not selecting serial interface, select MII mode */ |
1438 | #ifdef EPICMEDIADEBUG |
1439 | printf("%s: parallel mode\n" , ifp->if_xname); |
1440 | #endif |
1441 | miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); |
1442 | miicfg &= ~MIICFG_SERMODEENA; |
1443 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); |
1444 | } |
1445 | |
1446 | if ((rc = mii_mediachg(mii)) == ENXIO) |
1447 | rc = 0; |
1448 | |
1449 | if (IFM_INST(media) == sc->sc_serinst) { |
1450 | /* select serial interface */ |
1451 | #ifdef EPICMEDIADEBUG |
1452 | printf("%s: serial mode\n" , ifp->if_xname); |
1453 | #endif |
1454 | miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); |
1455 | miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE); |
1456 | bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); |
1457 | |
1458 | /* There is no driver to fill this */ |
1459 | mii->mii_media_active = media; |
1460 | mii->mii_media_status = 0; |
1461 | |
1462 | epic_statchg(mii->mii_ifp); |
1463 | return 0; |
1464 | } |
1465 | |
1466 | /* Lookup selected PHY */ |
1467 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { |
1468 | if (IFM_INST(media) == miisc->mii_inst) |
1469 | break; |
1470 | } |
1471 | if (!miisc) { |
1472 | printf("%s: can't happen\n" , __func__); /* ??? panic */ |
1473 | return 0; |
1474 | } |
1475 | #ifdef EPICMEDIADEBUG |
1476 | printf("%s: using phy %s\n" , ifp->if_xname, |
1477 | device_xname(miisc->mii_dev)); |
1478 | #endif |
1479 | |
1480 | if (miisc->mii_flags & MIIF_HAVEFIBER) { |
1481 | /* XXX XXX assume it's a Level1 - should check */ |
1482 | |
1483 | /* We have to powerup fiber transceivers */ |
1484 | cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG); |
1485 | if (IFM_SUBTYPE(media) == IFM_100_FX) { |
1486 | #ifdef EPICMEDIADEBUG |
1487 | printf("%s: power up fiber\n" , ifp->if_xname); |
1488 | #endif |
1489 | cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0); |
1490 | } else { |
1491 | #ifdef EPICMEDIADEBUG |
1492 | printf("%s: power down fiber\n" , ifp->if_xname); |
1493 | #endif |
1494 | cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); |
1495 | } |
1496 | PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg); |
1497 | } |
1498 | |
1499 | return rc; |
1500 | } |
1501 | |