1 | /* $NetBSD: if_bce.c,v 1.42 2016/06/10 13:27:14 ozaki-r Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2003 Clifford Wright. All rights reserved. |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * 3. The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
22 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
23 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
24 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
25 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 | * SUCH DAMAGE. |
28 | */ |
29 | |
30 | /* |
31 | * Broadcom BCM440x 10/100 ethernet (broadcom.com) |
32 | * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com) |
33 | * |
34 | * Cliff Wright cliff@snipe444.org |
35 | */ |
36 | |
37 | #include <sys/cdefs.h> |
38 | __KERNEL_RCSID(0, "$NetBSD: if_bce.c,v 1.42 2016/06/10 13:27:14 ozaki-r Exp $" ); |
39 | |
40 | #include "vlan.h" |
41 | |
42 | #include <sys/param.h> |
43 | #include <sys/systm.h> |
44 | #include <sys/callout.h> |
45 | #include <sys/sockio.h> |
46 | #include <sys/mbuf.h> |
47 | #include <sys/malloc.h> |
48 | #include <sys/kernel.h> |
49 | #include <sys/device.h> |
50 | #include <sys/socket.h> |
51 | |
52 | #include <net/if.h> |
53 | #include <net/if_dl.h> |
54 | #include <net/if_media.h> |
55 | #include <net/if_ether.h> |
56 | |
57 | #include <net/bpf.h> |
58 | #include <sys/rndsource.h> |
59 | |
60 | #include <dev/pci/pcireg.h> |
61 | #include <dev/pci/pcivar.h> |
62 | #include <dev/pci/pcidevs.h> |
63 | |
64 | #include <dev/mii/mii.h> |
65 | #include <dev/mii/miivar.h> |
66 | #include <dev/mii/miidevs.h> |
67 | #include <dev/mii/brgphyreg.h> |
68 | |
69 | #include <dev/pci/if_bcereg.h> |
70 | |
71 | /* transmit buffer max frags allowed */ |
72 | #define BCE_NTXFRAGS 16 |
73 | |
74 | /* ring descriptor */ |
75 | struct bce_dma_slot { |
76 | uint32_t ctrl; |
77 | uint32_t addr; |
78 | }; |
79 | #define CTRL_BC_MASK 0x1fff /* buffer byte count */ |
80 | #define CTRL_EOT 0x10000000 /* end of descriptor table */ |
81 | #define CTRL_IOC 0x20000000 /* interrupt on completion */ |
82 | #define CTRL_EOF 0x40000000 /* end of frame */ |
83 | #define CTRL_SOF 0x80000000 /* start of frame */ |
84 | |
85 | /* Packet status is returned in a pre-packet header */ |
86 | struct rx_pph { |
87 | uint16_t len; |
88 | uint16_t flags; |
89 | uint16_t pad[12]; |
90 | }; |
91 | |
92 | /* packet status flags bits */ |
93 | #define RXF_NO 0x8 /* odd number of nibbles */ |
94 | #define RXF_RXER 0x4 /* receive symbol error */ |
95 | #define RXF_CRC 0x2 /* crc error */ |
96 | #define RXF_OV 0x1 /* fifo overflow */ |
97 | |
98 | /* number of descriptors used in a ring */ |
99 | #define BCE_NRXDESC 128 |
100 | #define BCE_NTXDESC 128 |
101 | |
102 | /* |
103 | * Mbuf pointers. We need these to keep track of the virtual addresses |
104 | * of our mbuf chains since we can only convert from physical to virtual, |
105 | * not the other way around. |
106 | */ |
107 | struct bce_chain_data { |
108 | struct mbuf *bce_tx_chain[BCE_NTXDESC]; |
109 | struct mbuf *bce_rx_chain[BCE_NRXDESC]; |
110 | bus_dmamap_t bce_tx_map[BCE_NTXDESC]; |
111 | bus_dmamap_t bce_rx_map[BCE_NRXDESC]; |
112 | }; |
113 | |
114 | #define BCE_TIMEOUT 100 /* # 10us for mii read/write */ |
115 | |
116 | struct bce_softc { |
117 | device_t bce_dev; |
118 | bus_space_tag_t bce_btag; |
119 | bus_space_handle_t bce_bhandle; |
120 | bus_dma_tag_t bce_dmatag; |
121 | struct ethercom ethercom; /* interface info */ |
122 | void *bce_intrhand; |
123 | struct pci_attach_args bce_pa; |
124 | struct mii_data bce_mii; |
125 | uint32_t bce_phy; /* eeprom indicated phy */ |
126 | struct ifmedia bce_ifmedia; /* media info *//* Check */ |
127 | uint8_t enaddr[ETHER_ADDR_LEN]; |
128 | struct bce_dma_slot *bce_rx_ring; /* receive ring */ |
129 | struct bce_dma_slot *bce_tx_ring; /* transmit ring */ |
130 | struct bce_chain_data bce_cdata; /* mbufs */ |
131 | bus_dmamap_t bce_ring_map; |
132 | uint32_t bce_intmask; /* current intr mask */ |
133 | uint32_t bce_rxin; /* last rx descriptor seen */ |
134 | uint32_t bce_txin; /* last tx descriptor seen */ |
135 | int bce_txsfree; /* no. tx slots available */ |
136 | int bce_txsnext; /* next available tx slot */ |
137 | callout_t bce_timeout; |
138 | krndsource_t rnd_source; |
139 | }; |
140 | |
141 | /* for ring descriptors */ |
142 | #define BCE_RXBUF_LEN (MCLBYTES - 4) |
143 | #define BCE_INIT_RXDESC(sc, x) \ |
144 | do { \ |
145 | struct bce_dma_slot *__bced = &sc->bce_rx_ring[x]; \ |
146 | \ |
147 | *mtod(sc->bce_cdata.bce_rx_chain[x], uint32_t *) = 0; \ |
148 | __bced->addr = \ |
149 | htole32(sc->bce_cdata.bce_rx_map[x]->dm_segs[0].ds_addr \ |
150 | + 0x40000000); \ |
151 | if (x != (BCE_NRXDESC - 1)) \ |
152 | __bced->ctrl = htole32(BCE_RXBUF_LEN); \ |
153 | else \ |
154 | __bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT); \ |
155 | bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, \ |
156 | sizeof(struct bce_dma_slot) * x, \ |
157 | sizeof(struct bce_dma_slot), \ |
158 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ |
159 | } while (/* CONSTCOND */ 0) |
160 | |
161 | static int bce_probe(device_t, cfdata_t, void *); |
162 | static void bce_attach(device_t, device_t, void *); |
163 | static int bce_ioctl(struct ifnet *, u_long, void *); |
164 | static void bce_start(struct ifnet *); |
165 | static void bce_watchdog(struct ifnet *); |
166 | static int bce_intr(void *); |
167 | static void bce_rxintr(struct bce_softc *); |
168 | static void bce_txintr(struct bce_softc *); |
169 | static int bce_init(struct ifnet *); |
170 | static void bce_add_mac(struct bce_softc *, uint8_t *, unsigned long); |
171 | static int bce_add_rxbuf(struct bce_softc *, int); |
172 | static void bce_rxdrain(struct bce_softc *); |
173 | static void bce_stop(struct ifnet *, int); |
174 | static void bce_reset(struct bce_softc *); |
175 | static bool bce_resume(device_t, const pmf_qual_t *); |
176 | static void bce_set_filter(struct ifnet *); |
177 | static int bce_mii_read(device_t, int, int); |
178 | static void bce_mii_write(device_t, int, int, int); |
179 | static void bce_statchg(struct ifnet *); |
180 | static void bce_tick(void *); |
181 | |
182 | CFATTACH_DECL_NEW(bce, sizeof(struct bce_softc), |
183 | bce_probe, bce_attach, NULL, NULL); |
184 | |
185 | static const struct bce_product { |
186 | pci_vendor_id_t bp_vendor; |
187 | pci_product_id_t bp_product; |
188 | const char *bp_name; |
189 | } bce_products[] = { |
190 | { |
191 | PCI_VENDOR_BROADCOM, |
192 | PCI_PRODUCT_BROADCOM_BCM4401, |
193 | "Broadcom BCM4401 10/100 Ethernet" |
194 | }, |
195 | { |
196 | PCI_VENDOR_BROADCOM, |
197 | PCI_PRODUCT_BROADCOM_BCM4401_B0, |
198 | "Broadcom BCM4401-B0 10/100 Ethernet" |
199 | }, |
200 | { |
201 | |
202 | 0, |
203 | 0, |
204 | NULL |
205 | }, |
206 | }; |
207 | |
208 | static const struct bce_product * |
209 | bce_lookup(const struct pci_attach_args * pa) |
210 | { |
211 | const struct bce_product *bp; |
212 | |
213 | for (bp = bce_products; bp->bp_name != NULL; bp++) { |
214 | if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && |
215 | PCI_PRODUCT(pa->pa_id) == bp->bp_product) |
216 | return (bp); |
217 | } |
218 | |
219 | return (NULL); |
220 | } |
221 | |
222 | /* |
223 | * Probe for a Broadcom chip. Check the PCI vendor and device IDs |
224 | * against drivers product list, and return its name if a match is found. |
225 | */ |
226 | static int |
227 | bce_probe(device_t parent, cfdata_t match, void *aux) |
228 | { |
229 | struct pci_attach_args *pa = (struct pci_attach_args *) aux; |
230 | |
231 | if (bce_lookup(pa) != NULL) |
232 | return (1); |
233 | |
234 | return (0); |
235 | } |
236 | |
237 | static void |
238 | bce_attach(device_t parent, device_t self, void *aux) |
239 | { |
240 | struct bce_softc *sc = device_private(self); |
241 | struct pci_attach_args *pa = aux; |
242 | const struct bce_product *bp; |
243 | pci_chipset_tag_t pc = pa->pa_pc; |
244 | pci_intr_handle_t ih; |
245 | const char *intrstr = NULL; |
246 | uint32_t command; |
247 | pcireg_t memtype, pmode; |
248 | bus_addr_t memaddr; |
249 | bus_size_t memsize; |
250 | void *kva; |
251 | bus_dma_segment_t seg; |
252 | int error, i, pmreg, rseg; |
253 | struct ifnet *ifp; |
254 | char intrbuf[PCI_INTRSTR_LEN]; |
255 | |
256 | sc->bce_dev = self; |
257 | |
258 | bp = bce_lookup(pa); |
259 | KASSERT(bp != NULL); |
260 | |
261 | sc->bce_pa = *pa; |
262 | |
263 | /* BCM440x can only address 30 bits (1GB) */ |
264 | if (bus_dmatag_subregion(pa->pa_dmat, 0, (1 << 30), |
265 | &(sc->bce_dmatag), BUS_DMA_NOWAIT) != 0) { |
266 | aprint_error_dev(self, |
267 | "WARNING: failed to restrict dma range," |
268 | " falling back to parent bus dma range\n" ); |
269 | sc->bce_dmatag = pa->pa_dmat; |
270 | } |
271 | |
272 | aprint_naive(": Ethernet controller\n" ); |
273 | aprint_normal(": %s\n" , bp->bp_name); |
274 | |
275 | /* |
276 | * Map control/status registers. |
277 | */ |
278 | command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
279 | command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; |
280 | pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); |
281 | command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
282 | |
283 | if (!(command & PCI_COMMAND_MEM_ENABLE)) { |
284 | aprint_error_dev(self, "failed to enable memory mapping!\n" ); |
285 | return; |
286 | } |
287 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0); |
288 | switch (memtype) { |
289 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
290 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
291 | if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag, |
292 | &sc->bce_bhandle, &memaddr, &memsize) == 0) |
293 | break; |
294 | default: |
295 | aprint_error_dev(self, "unable to find mem space\n" ); |
296 | return; |
297 | } |
298 | |
299 | /* Get it out of power save mode if needed. */ |
300 | if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, NULL)) { |
301 | pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3; |
302 | if (pmode == 3) { |
303 | /* |
304 | * The card has lost all configuration data in |
305 | * this state, so punt. |
306 | */ |
307 | aprint_error_dev(self, |
308 | "unable to wake up from power state D3\n" ); |
309 | return; |
310 | } |
311 | if (pmode != 0) { |
312 | aprint_normal_dev(self, |
313 | "waking up from power state D%d\n" , pmode); |
314 | pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0); |
315 | } |
316 | } |
317 | if (pci_intr_map(pa, &ih)) { |
318 | aprint_error_dev(self, "couldn't map interrupt\n" ); |
319 | return; |
320 | } |
321 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); |
322 | |
323 | sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc); |
324 | |
325 | if (sc->bce_intrhand == NULL) { |
326 | aprint_error_dev(self, "couldn't establish interrupt\n" ); |
327 | if (intrstr != NULL) |
328 | aprint_error(" at %s" , intrstr); |
329 | aprint_error("\n" ); |
330 | return; |
331 | } |
332 | aprint_normal_dev(self, "interrupting at %s\n" , intrstr); |
333 | |
334 | /* reset the chip */ |
335 | bce_reset(sc); |
336 | |
337 | /* |
338 | * Allocate DMA-safe memory for ring descriptors. |
339 | * The receive, and transmit rings can not share the same |
340 | * 4k space, however both are allocated at once here. |
341 | */ |
342 | /* |
343 | * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but |
344 | * due to the limition above. ?? |
345 | */ |
346 | if ((error = bus_dmamem_alloc(sc->bce_dmatag, |
347 | 2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE, |
348 | &seg, 1, &rseg, BUS_DMA_NOWAIT))) { |
349 | aprint_error_dev(self, |
350 | "unable to alloc space for ring descriptors, error = %d\n" , |
351 | error); |
352 | return; |
353 | } |
354 | /* map ring space to kernel */ |
355 | if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg, |
356 | 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) { |
357 | aprint_error_dev(self, |
358 | "unable to map DMA buffers, error = %d\n" , error); |
359 | bus_dmamem_free(sc->bce_dmatag, &seg, rseg); |
360 | return; |
361 | } |
362 | /* create a dma map for the ring */ |
363 | if ((error = bus_dmamap_create(sc->bce_dmatag, |
364 | 2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, |
365 | &sc->bce_ring_map))) { |
366 | aprint_error_dev(self, |
367 | "unable to create ring DMA map, error = %d\n" , error); |
368 | bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE); |
369 | bus_dmamem_free(sc->bce_dmatag, &seg, rseg); |
370 | return; |
371 | } |
372 | /* connect the ring space to the dma map */ |
373 | if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva, |
374 | 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { |
375 | bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map); |
376 | bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE); |
377 | bus_dmamem_free(sc->bce_dmatag, &seg, rseg); |
378 | return; |
379 | } |
380 | /* save the ring space in softc */ |
381 | sc->bce_rx_ring = (struct bce_dma_slot *) kva; |
382 | sc->bce_tx_ring = (struct bce_dma_slot *) ((char *)kva + PAGE_SIZE); |
383 | |
384 | /* Create the transmit buffer DMA maps. */ |
385 | for (i = 0; i < BCE_NTXDESC; i++) { |
386 | if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, |
387 | BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) { |
388 | aprint_error_dev(self, |
389 | "unable to create tx DMA map, error = %d\n" , error); |
390 | } |
391 | sc->bce_cdata.bce_tx_chain[i] = NULL; |
392 | } |
393 | |
394 | /* Create the receive buffer DMA maps. */ |
395 | for (i = 0; i < BCE_NRXDESC; i++) { |
396 | if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1, |
397 | MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) { |
398 | aprint_error_dev(self, |
399 | "unable to create rx DMA map, error = %d\n" , error); |
400 | } |
401 | sc->bce_cdata.bce_rx_chain[i] = NULL; |
402 | } |
403 | |
404 | /* Set up ifnet structure */ |
405 | ifp = &sc->ethercom.ec_if; |
406 | strcpy(ifp->if_xname, device_xname(self)); |
407 | ifp->if_softc = sc; |
408 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
409 | ifp->if_ioctl = bce_ioctl; |
410 | ifp->if_start = bce_start; |
411 | ifp->if_watchdog = bce_watchdog; |
412 | ifp->if_init = bce_init; |
413 | ifp->if_stop = bce_stop; |
414 | IFQ_SET_READY(&ifp->if_snd); |
415 | |
416 | /* Initialize our media structures and probe the MII. */ |
417 | |
418 | sc->bce_mii.mii_ifp = ifp; |
419 | sc->bce_mii.mii_readreg = bce_mii_read; |
420 | sc->bce_mii.mii_writereg = bce_mii_write; |
421 | sc->bce_mii.mii_statchg = bce_statchg; |
422 | |
423 | sc->ethercom.ec_mii = &sc->bce_mii; |
424 | ifmedia_init(&sc->bce_mii.mii_media, 0, ether_mediachange, |
425 | ether_mediastatus); |
426 | mii_attach(sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY, |
427 | MII_OFFSET_ANY, MIIF_FORCEANEG|MIIF_DOPAUSE); |
428 | if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) { |
429 | ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL); |
430 | ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE); |
431 | } else |
432 | ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO); |
433 | /* get the phy */ |
434 | sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, |
435 | BCE_MAGIC_PHY) & 0x1f; |
436 | /* |
437 | * Enable activity led. |
438 | * XXX This should be in a phy driver, but not currently. |
439 | */ |
440 | bce_mii_write(sc->bce_dev, 1, 26, /* MAGIC */ |
441 | bce_mii_read(sc->bce_dev, 1, 26) & 0x7fff); /* MAGIC */ |
442 | /* enable traffic meter led mode */ |
443 | bce_mii_write(sc->bce_dev, 1, 27, /* MAGIC */ |
444 | bce_mii_read(sc->bce_dev, 1, 27) | (1 << 6)); /* MAGIC */ |
445 | |
446 | /* Attach the interface */ |
447 | if_attach(ifp); |
448 | sc->enaddr[0] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, |
449 | BCE_MAGIC_ENET0); |
450 | sc->enaddr[1] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, |
451 | BCE_MAGIC_ENET1); |
452 | sc->enaddr[2] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, |
453 | BCE_MAGIC_ENET2); |
454 | sc->enaddr[3] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, |
455 | BCE_MAGIC_ENET3); |
456 | sc->enaddr[4] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, |
457 | BCE_MAGIC_ENET4); |
458 | sc->enaddr[5] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, |
459 | BCE_MAGIC_ENET5); |
460 | aprint_normal_dev(self, "Ethernet address %s\n" , |
461 | ether_sprintf(sc->enaddr)); |
462 | ether_ifattach(ifp, sc->enaddr); |
463 | rnd_attach_source(&sc->rnd_source, device_xname(self), |
464 | RND_TYPE_NET, RND_FLAG_DEFAULT); |
465 | callout_init(&sc->bce_timeout, 0); |
466 | |
467 | if (pmf_device_register(self, NULL, bce_resume)) |
468 | pmf_class_network_register(self, ifp); |
469 | else |
470 | aprint_error_dev(self, "couldn't establish power handler\n" ); |
471 | } |
472 | |
473 | /* handle media, and ethernet requests */ |
474 | static int |
475 | bce_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
476 | { |
477 | int s, error; |
478 | |
479 | s = splnet(); |
480 | error = ether_ioctl(ifp, cmd, data); |
481 | if (error == ENETRESET) { |
482 | /* change multicast list */ |
483 | error = 0; |
484 | } |
485 | |
486 | /* Try to get more packets going. */ |
487 | bce_start(ifp); |
488 | |
489 | splx(s); |
490 | return error; |
491 | } |
492 | |
493 | /* Start packet transmission on the interface. */ |
494 | static void |
495 | bce_start(struct ifnet *ifp) |
496 | { |
497 | struct bce_softc *sc = ifp->if_softc; |
498 | struct mbuf *m0; |
499 | bus_dmamap_t dmamap; |
500 | int txstart; |
501 | int txsfree; |
502 | int newpkts = 0; |
503 | int error; |
504 | |
505 | /* |
506 | * do not start another if currently transmitting, and more |
507 | * descriptors(tx slots) are needed for next packet. |
508 | */ |
509 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
510 | return; |
511 | |
512 | /* determine number of descriptors available */ |
513 | if (sc->bce_txsnext >= sc->bce_txin) |
514 | txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext; |
515 | else |
516 | txsfree = sc->bce_txin - sc->bce_txsnext - 1; |
517 | |
518 | /* |
519 | * Loop through the send queue, setting up transmit descriptors |
520 | * until we drain the queue, or use up all available transmit |
521 | * descriptors. |
522 | */ |
523 | while (txsfree > 0) { |
524 | int seg; |
525 | |
526 | /* Grab a packet off the queue. */ |
527 | IFQ_POLL(&ifp->if_snd, m0); |
528 | if (m0 == NULL) |
529 | break; |
530 | |
531 | /* get the transmit slot dma map */ |
532 | dmamap = sc->bce_cdata.bce_tx_map[sc->bce_txsnext]; |
533 | |
534 | /* |
535 | * Load the DMA map. If this fails, the packet either |
536 | * didn't fit in the alloted number of segments, or we |
537 | * were short on resources. If the packet will not fit, |
538 | * it will be dropped. If short on resources, it will |
539 | * be tried again later. |
540 | */ |
541 | error = bus_dmamap_load_mbuf(sc->bce_dmatag, dmamap, m0, |
542 | BUS_DMA_WRITE | BUS_DMA_NOWAIT); |
543 | if (error == EFBIG) { |
544 | aprint_error_dev(sc->bce_dev, |
545 | "Tx packet consumes too many DMA segments, " |
546 | "dropping...\n" ); |
547 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
548 | m_freem(m0); |
549 | ifp->if_oerrors++; |
550 | continue; |
551 | } else if (error) { |
552 | /* short on resources, come back later */ |
553 | aprint_error_dev(sc->bce_dev, |
554 | "unable to load Tx buffer, error = %d\n" , |
555 | error); |
556 | break; |
557 | } |
558 | /* If not enough descriptors available, try again later */ |
559 | if (dmamap->dm_nsegs > txsfree) { |
560 | ifp->if_flags |= IFF_OACTIVE; |
561 | bus_dmamap_unload(sc->bce_dmatag, dmamap); |
562 | break; |
563 | } |
564 | /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ |
565 | |
566 | /* So take it off the queue */ |
567 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
568 | |
569 | /* save the pointer so it can be freed later */ |
570 | sc->bce_cdata.bce_tx_chain[sc->bce_txsnext] = m0; |
571 | |
572 | /* Sync the data DMA map. */ |
573 | bus_dmamap_sync(sc->bce_dmatag, dmamap, 0, dmamap->dm_mapsize, |
574 | BUS_DMASYNC_PREWRITE); |
575 | |
576 | /* Initialize the transmit descriptor(s). */ |
577 | txstart = sc->bce_txsnext; |
578 | for (seg = 0; seg < dmamap->dm_nsegs; seg++) { |
579 | uint32_t ctrl; |
580 | |
581 | ctrl = dmamap->dm_segs[seg].ds_len & CTRL_BC_MASK; |
582 | if (seg == 0) |
583 | ctrl |= CTRL_SOF; |
584 | if (seg == dmamap->dm_nsegs - 1) |
585 | ctrl |= CTRL_EOF; |
586 | if (sc->bce_txsnext == BCE_NTXDESC - 1) |
587 | ctrl |= CTRL_EOT; |
588 | ctrl |= CTRL_IOC; |
589 | sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl); |
590 | sc->bce_tx_ring[sc->bce_txsnext].addr = |
591 | htole32(dmamap->dm_segs[seg].ds_addr + 0x40000000); /* MAGIC */ |
592 | if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1) |
593 | sc->bce_txsnext = 0; |
594 | else |
595 | sc->bce_txsnext++; |
596 | txsfree--; |
597 | } |
598 | /* sync descriptors being used */ |
599 | if ( sc->bce_txsnext > txstart ) { |
600 | bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, |
601 | PAGE_SIZE + sizeof(struct bce_dma_slot) * txstart, |
602 | sizeof(struct bce_dma_slot) * dmamap->dm_nsegs, |
603 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
604 | } else { |
605 | bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, |
606 | PAGE_SIZE + sizeof(struct bce_dma_slot) * txstart, |
607 | sizeof(struct bce_dma_slot) * |
608 | (BCE_NTXDESC - txstart), |
609 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
610 | if ( sc->bce_txsnext != 0 ) { |
611 | bus_dmamap_sync(sc->bce_dmatag, |
612 | sc->bce_ring_map, PAGE_SIZE, |
613 | sc->bce_txsnext * |
614 | sizeof(struct bce_dma_slot), |
615 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
616 | } |
617 | } |
618 | |
619 | /* Give the packet to the chip. */ |
620 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR, |
621 | sc->bce_txsnext * sizeof(struct bce_dma_slot)); |
622 | |
623 | newpkts++; |
624 | |
625 | /* Pass the packet to any BPF listeners. */ |
626 | bpf_mtap(ifp, m0); |
627 | } |
628 | if (txsfree == 0) { |
629 | /* No more slots left; notify upper layer. */ |
630 | ifp->if_flags |= IFF_OACTIVE; |
631 | } |
632 | if (newpkts) { |
633 | /* Set a watchdog timer in case the chip flakes out. */ |
634 | ifp->if_timer = 5; |
635 | } |
636 | } |
637 | |
638 | /* Watchdog timer handler. */ |
639 | static void |
640 | bce_watchdog(struct ifnet *ifp) |
641 | { |
642 | struct bce_softc *sc = ifp->if_softc; |
643 | |
644 | aprint_error_dev(sc->bce_dev, "device timeout\n" ); |
645 | ifp->if_oerrors++; |
646 | |
647 | (void) bce_init(ifp); |
648 | |
649 | /* Try to get more packets going. */ |
650 | bce_start(ifp); |
651 | } |
652 | |
653 | int |
654 | bce_intr(void *xsc) |
655 | { |
656 | struct bce_softc *sc; |
657 | struct ifnet *ifp; |
658 | uint32_t intstatus; |
659 | int wantinit; |
660 | int handled = 0; |
661 | |
662 | sc = xsc; |
663 | ifp = &sc->ethercom.ec_if; |
664 | |
665 | for (wantinit = 0; wantinit == 0;) { |
666 | intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
667 | BCE_INT_STS); |
668 | |
669 | /* ignore if not ours, or unsolicited interrupts */ |
670 | intstatus &= sc->bce_intmask; |
671 | if (intstatus == 0) |
672 | break; |
673 | |
674 | handled = 1; |
675 | |
676 | /* Ack interrupt */ |
677 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS, |
678 | intstatus); |
679 | |
680 | /* Receive interrupts. */ |
681 | if (intstatus & I_RI) |
682 | bce_rxintr(sc); |
683 | /* Transmit interrupts. */ |
684 | if (intstatus & I_XI) |
685 | bce_txintr(sc); |
686 | /* Error interrupts */ |
687 | if (intstatus & ~(I_RI | I_XI)) { |
688 | const char *msg = NULL; |
689 | if (intstatus & I_XU) |
690 | msg = "transmit fifo underflow" ; |
691 | if (intstatus & I_RO) { |
692 | msg = "receive fifo overflow" ; |
693 | ifp->if_ierrors++; |
694 | } |
695 | if (intstatus & I_RU) |
696 | msg = "receive descriptor underflow" ; |
697 | if (intstatus & I_DE) |
698 | msg = "descriptor protocol error" ; |
699 | if (intstatus & I_PD) |
700 | msg = "data error" ; |
701 | if (intstatus & I_PC) |
702 | msg = "descriptor error" ; |
703 | if (intstatus & I_TO) |
704 | msg = "general purpose timeout" ; |
705 | if (msg != NULL) |
706 | aprint_error_dev(sc->bce_dev, "%s\n" , msg); |
707 | wantinit = 1; |
708 | } |
709 | } |
710 | |
711 | if (handled) { |
712 | if (wantinit) |
713 | bce_init(ifp); |
714 | rnd_add_uint32(&sc->rnd_source, intstatus); |
715 | /* Try to get more packets going. */ |
716 | bce_start(ifp); |
717 | } |
718 | return (handled); |
719 | } |
720 | |
721 | /* Receive interrupt handler */ |
722 | void |
723 | bce_rxintr(struct bce_softc *sc) |
724 | { |
725 | struct ifnet *ifp = &sc->ethercom.ec_if; |
726 | struct rx_pph *pph; |
727 | struct mbuf *m; |
728 | int curr; |
729 | int len; |
730 | int i; |
731 | |
732 | /* get pointer to active receive slot */ |
733 | curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS) |
734 | & RS_CD_MASK; |
735 | curr = curr / sizeof(struct bce_dma_slot); |
736 | if (curr >= BCE_NRXDESC) |
737 | curr = BCE_NRXDESC - 1; |
738 | |
739 | /* process packets up to but not current packet being worked on */ |
740 | for (i = sc->bce_rxin; i != curr; |
741 | i + 1 > BCE_NRXDESC - 1 ? i = 0 : i++) { |
742 | /* complete any post dma memory ops on packet */ |
743 | bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[i], 0, |
744 | sc->bce_cdata.bce_rx_map[i]->dm_mapsize, |
745 | BUS_DMASYNC_POSTREAD); |
746 | |
747 | /* |
748 | * If the packet had an error, simply recycle the buffer, |
749 | * resetting the len, and flags. |
750 | */ |
751 | pph = mtod(sc->bce_cdata.bce_rx_chain[i], struct rx_pph *); |
752 | if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) { |
753 | ifp->if_ierrors++; |
754 | pph->len = 0; |
755 | pph->flags = 0; |
756 | continue; |
757 | } |
758 | /* receive the packet */ |
759 | len = pph->len; |
760 | if (len == 0) |
761 | continue; /* no packet if empty */ |
762 | pph->len = 0; |
763 | pph->flags = 0; |
764 | /* bump past pre header to packet */ |
765 | sc->bce_cdata.bce_rx_chain[i]->m_data += 30; /* MAGIC */ |
766 | |
767 | /* |
768 | * The chip includes the CRC with every packet. Trim |
769 | * it off here. |
770 | */ |
771 | len -= ETHER_CRC_LEN; |
772 | |
773 | /* |
774 | * If the packet is small enough to fit in a |
775 | * single header mbuf, allocate one and copy |
776 | * the data into it. This greatly reduces |
777 | * memory consumption when receiving lots |
778 | * of small packets. |
779 | * |
780 | * Otherwise, add a new buffer to the receive |
781 | * chain. If this fails, drop the packet and |
782 | * recycle the old buffer. |
783 | */ |
784 | if (len <= (MHLEN - 2)) { |
785 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
786 | if (m == NULL) |
787 | goto dropit; |
788 | m->m_data += 2; |
789 | memcpy(mtod(m, void *), |
790 | mtod(sc->bce_cdata.bce_rx_chain[i], void *), len); |
791 | sc->bce_cdata.bce_rx_chain[i]->m_data -= 30; /* MAGIC */ |
792 | } else { |
793 | m = sc->bce_cdata.bce_rx_chain[i]; |
794 | if (bce_add_rxbuf(sc, i) != 0) { |
795 | dropit: |
796 | ifp->if_ierrors++; |
797 | /* continue to use old buffer */ |
798 | sc->bce_cdata.bce_rx_chain[i]->m_data -= 30; |
799 | bus_dmamap_sync(sc->bce_dmatag, |
800 | sc->bce_cdata.bce_rx_map[i], 0, |
801 | sc->bce_cdata.bce_rx_map[i]->dm_mapsize, |
802 | BUS_DMASYNC_PREREAD); |
803 | continue; |
804 | } |
805 | } |
806 | |
807 | m_set_rcvif(m, ifp); |
808 | m->m_pkthdr.len = m->m_len = len; |
809 | ifp->if_ipackets++; |
810 | |
811 | /* |
812 | * Pass this up to any BPF listeners, but only |
813 | * pass it up the stack if it's for us. |
814 | */ |
815 | bpf_mtap(ifp, m); |
816 | |
817 | /* Pass it on. */ |
818 | if_percpuq_enqueue(ifp->if_percpuq, m); |
819 | |
820 | /* re-check current in case it changed */ |
821 | curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
822 | BCE_DMA_RXSTATUS) & RS_CD_MASK) / |
823 | sizeof(struct bce_dma_slot); |
824 | if (curr >= BCE_NRXDESC) |
825 | curr = BCE_NRXDESC - 1; |
826 | } |
827 | sc->bce_rxin = curr; |
828 | } |
829 | |
830 | /* Transmit interrupt handler */ |
831 | void |
832 | bce_txintr(struct bce_softc *sc) |
833 | { |
834 | struct ifnet *ifp = &sc->ethercom.ec_if; |
835 | int curr; |
836 | int i; |
837 | |
838 | ifp->if_flags &= ~IFF_OACTIVE; |
839 | |
840 | /* |
841 | * Go through the Tx list and free mbufs for those |
842 | * frames which have been transmitted. |
843 | */ |
844 | curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXSTATUS) & |
845 | RS_CD_MASK; |
846 | curr = curr / sizeof(struct bce_dma_slot); |
847 | if (curr >= BCE_NTXDESC) |
848 | curr = BCE_NTXDESC - 1; |
849 | for (i = sc->bce_txin; i != curr; |
850 | i + 1 > BCE_NTXDESC - 1 ? i = 0 : i++) { |
851 | /* do any post dma memory ops on transmit data */ |
852 | if (sc->bce_cdata.bce_tx_chain[i] == NULL) |
853 | continue; |
854 | bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i], 0, |
855 | sc->bce_cdata.bce_tx_map[i]->dm_mapsize, |
856 | BUS_DMASYNC_POSTWRITE); |
857 | bus_dmamap_unload(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i]); |
858 | m_freem(sc->bce_cdata.bce_tx_chain[i]); |
859 | sc->bce_cdata.bce_tx_chain[i] = NULL; |
860 | ifp->if_opackets++; |
861 | } |
862 | sc->bce_txin = curr; |
863 | |
864 | /* |
865 | * If there are no more pending transmissions, cancel the watchdog |
866 | * timer |
867 | */ |
868 | if (sc->bce_txsnext == sc->bce_txin) |
869 | ifp->if_timer = 0; |
870 | } |
871 | |
872 | /* initialize the interface */ |
873 | static int |
874 | bce_init(struct ifnet *ifp) |
875 | { |
876 | struct bce_softc *sc = ifp->if_softc; |
877 | uint32_t reg_win; |
878 | int error; |
879 | int i; |
880 | |
881 | /* Cancel any pending I/O. */ |
882 | bce_stop(ifp, 0); |
883 | |
884 | /* enable pci inerrupts, bursts, and prefetch */ |
885 | |
886 | /* remap the pci registers to the Sonics config registers */ |
887 | |
888 | /* save the current map, so it can be restored */ |
889 | reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, |
890 | BCE_REG_WIN); |
891 | |
892 | /* set register window to Sonics registers */ |
893 | pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, |
894 | BCE_SONICS_WIN); |
895 | |
896 | /* enable SB to PCI interrupt */ |
897 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, |
898 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) | |
899 | SBIV_ENET0); |
900 | |
901 | /* enable prefetch and bursts for sonics-to-pci translation 2 */ |
902 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, |
903 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) | |
904 | SBTOPCI_PREF | SBTOPCI_BURST); |
905 | |
906 | /* restore to ethernet register space */ |
907 | pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, |
908 | reg_win); |
909 | |
910 | /* Reset the chip to a known state. */ |
911 | bce_reset(sc); |
912 | |
913 | /* Initialize transmit descriptors */ |
914 | memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot)); |
915 | sc->bce_txsnext = 0; |
916 | sc->bce_txin = 0; |
917 | |
918 | /* enable crc32 generation */ |
919 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, |
920 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) | |
921 | BCE_EMC_CG); |
922 | |
923 | /* setup DMA interrupt control */ |
924 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24); /* MAGIC */ |
925 | |
926 | /* setup packet filter */ |
927 | bce_set_filter(ifp); |
928 | |
929 | /* set max frame length, account for possible vlan tag */ |
930 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX, |
931 | ETHER_MAX_LEN + 32); |
932 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX, |
933 | ETHER_MAX_LEN + 32); |
934 | |
935 | /* set tx watermark */ |
936 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56); |
937 | |
938 | /* enable transmit */ |
939 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE); |
940 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR, |
941 | sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000); /* MAGIC */ |
942 | |
943 | /* |
944 | * Give the receive ring to the chip, and |
945 | * start the receive DMA engine. |
946 | */ |
947 | sc->bce_rxin = 0; |
948 | |
949 | /* clear the rx descriptor ring */ |
950 | memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot)); |
951 | /* enable receive */ |
952 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, |
953 | 30 << 1 | 1); /* MAGIC */ |
954 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR, |
955 | sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000); /* MAGIC */ |
956 | |
957 | /* Initalize receive descriptors */ |
958 | for (i = 0; i < BCE_NRXDESC; i++) { |
959 | if (sc->bce_cdata.bce_rx_chain[i] == NULL) { |
960 | if ((error = bce_add_rxbuf(sc, i)) != 0) { |
961 | aprint_error_dev(sc->bce_dev, |
962 | "unable to allocate or map rx(%d) " |
963 | "mbuf, error = %d\n" , i, error); |
964 | bce_rxdrain(sc); |
965 | return (error); |
966 | } |
967 | } else |
968 | BCE_INIT_RXDESC(sc, i); |
969 | } |
970 | |
971 | /* Enable interrupts */ |
972 | sc->bce_intmask = |
973 | I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO; |
974 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, |
975 | sc->bce_intmask); |
976 | |
977 | /* start the receive dma */ |
978 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR, |
979 | BCE_NRXDESC * sizeof(struct bce_dma_slot)); |
980 | |
981 | /* set media */ |
982 | if ((error = ether_mediachange(ifp)) != 0) |
983 | return error; |
984 | |
985 | /* turn on the ethernet mac */ |
986 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, |
987 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
988 | BCE_ENET_CTL) | EC_EE); |
989 | |
990 | /* start timer */ |
991 | callout_reset(&sc->bce_timeout, hz, bce_tick, sc); |
992 | |
993 | /* mark as running, and no outputs active */ |
994 | ifp->if_flags |= IFF_RUNNING; |
995 | ifp->if_flags &= ~IFF_OACTIVE; |
996 | |
997 | return 0; |
998 | } |
999 | |
1000 | /* add a mac address to packet filter */ |
1001 | void |
1002 | bce_add_mac(struct bce_softc *sc, uint8_t *mac, u_long idx) |
1003 | { |
1004 | int i; |
1005 | uint32_t rval; |
1006 | |
1007 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW, |
1008 | mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]); |
1009 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI, |
1010 | mac[0] << 8 | mac[1] | 0x10000); /* MAGIC */ |
1011 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, |
1012 | idx << 16 | 8); /* MAGIC */ |
1013 | /* wait for write to complete */ |
1014 | for (i = 0; i < 100; i++) { |
1015 | rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1016 | BCE_FILT_CTL); |
1017 | if (!(rval & 0x80000000)) /* MAGIC */ |
1018 | break; |
1019 | delay(10); |
1020 | } |
1021 | if (i == 100) { |
1022 | aprint_error_dev(sc->bce_dev, |
1023 | "timed out writing pkt filter ctl\n" ); |
1024 | } |
1025 | } |
1026 | |
1027 | /* Add a receive buffer to the indiciated descriptor. */ |
1028 | static int |
1029 | bce_add_rxbuf(struct bce_softc *sc, int idx) |
1030 | { |
1031 | struct mbuf *m; |
1032 | int error; |
1033 | |
1034 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1035 | if (m == NULL) |
1036 | return (ENOBUFS); |
1037 | |
1038 | MCLGET(m, M_DONTWAIT); |
1039 | if ((m->m_flags & M_EXT) == 0) { |
1040 | m_freem(m); |
1041 | return (ENOBUFS); |
1042 | } |
1043 | if (sc->bce_cdata.bce_rx_chain[idx] != NULL) |
1044 | bus_dmamap_unload(sc->bce_dmatag, |
1045 | sc->bce_cdata.bce_rx_map[idx]); |
1046 | |
1047 | sc->bce_cdata.bce_rx_chain[idx] = m; |
1048 | |
1049 | error = bus_dmamap_load(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], |
1050 | m->m_ext.ext_buf, m->m_ext.ext_size, NULL, |
1051 | BUS_DMA_READ | BUS_DMA_NOWAIT); |
1052 | if (error) |
1053 | return (error); |
1054 | |
1055 | bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], 0, |
1056 | sc->bce_cdata.bce_rx_map[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); |
1057 | |
1058 | BCE_INIT_RXDESC(sc, idx); |
1059 | |
1060 | return (0); |
1061 | |
1062 | } |
1063 | |
1064 | /* Drain the receive queue. */ |
1065 | static void |
1066 | bce_rxdrain(struct bce_softc *sc) |
1067 | { |
1068 | int i; |
1069 | |
1070 | for (i = 0; i < BCE_NRXDESC; i++) { |
1071 | if (sc->bce_cdata.bce_rx_chain[i] != NULL) { |
1072 | bus_dmamap_unload(sc->bce_dmatag, |
1073 | sc->bce_cdata.bce_rx_map[i]); |
1074 | m_freem(sc->bce_cdata.bce_rx_chain[i]); |
1075 | sc->bce_cdata.bce_rx_chain[i] = NULL; |
1076 | } |
1077 | } |
1078 | } |
1079 | |
1080 | /* Stop transmission on the interface */ |
1081 | static void |
1082 | bce_stop(struct ifnet *ifp, int disable) |
1083 | { |
1084 | struct bce_softc *sc = ifp->if_softc; |
1085 | int i; |
1086 | uint32_t val; |
1087 | |
1088 | /* Stop the 1 second timer */ |
1089 | callout_stop(&sc->bce_timeout); |
1090 | |
1091 | /* Down the MII. */ |
1092 | mii_down(&sc->bce_mii); |
1093 | |
1094 | /* Disable interrupts. */ |
1095 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0); |
1096 | sc->bce_intmask = 0; |
1097 | delay(10); |
1098 | |
1099 | /* Disable emac */ |
1100 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED); |
1101 | for (i = 0; i < 200; i++) { |
1102 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1103 | BCE_ENET_CTL); |
1104 | if (!(val & EC_ED)) |
1105 | break; |
1106 | delay(10); |
1107 | } |
1108 | |
1109 | /* Stop the DMA */ |
1110 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0); |
1111 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0); |
1112 | delay(10); |
1113 | |
1114 | /* Release any queued transmit buffers. */ |
1115 | for (i = 0; i < BCE_NTXDESC; i++) { |
1116 | if (sc->bce_cdata.bce_tx_chain[i] != NULL) { |
1117 | bus_dmamap_unload(sc->bce_dmatag, |
1118 | sc->bce_cdata.bce_tx_map[i]); |
1119 | m_freem(sc->bce_cdata.bce_tx_chain[i]); |
1120 | sc->bce_cdata.bce_tx_chain[i] = NULL; |
1121 | } |
1122 | } |
1123 | |
1124 | /* Mark the interface down and cancel the watchdog timer. */ |
1125 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1126 | ifp->if_timer = 0; |
1127 | |
1128 | /* drain receive queue */ |
1129 | if (disable) |
1130 | bce_rxdrain(sc); |
1131 | } |
1132 | |
1133 | /* reset the chip */ |
1134 | static void |
1135 | bce_reset(struct bce_softc *sc) |
1136 | { |
1137 | uint32_t val; |
1138 | uint32_t sbval; |
1139 | int i; |
1140 | |
1141 | /* if SB core is up */ |
1142 | sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1143 | BCE_SBTMSTATELOW); |
1144 | if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) { |
1145 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, |
1146 | 0); |
1147 | |
1148 | /* disable emac */ |
1149 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, |
1150 | EC_ED); |
1151 | for (i = 0; i < 200; i++) { |
1152 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1153 | BCE_ENET_CTL); |
1154 | if (!(val & EC_ED)) |
1155 | break; |
1156 | delay(10); |
1157 | } |
1158 | if (i == 200) { |
1159 | aprint_error_dev(sc->bce_dev, |
1160 | "timed out disabling ethernet mac\n" ); |
1161 | } |
1162 | |
1163 | /* reset the dma engines */ |
1164 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0); |
1165 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS); |
1166 | /* if error on receive, wait to go idle */ |
1167 | if (val & RS_ERROR) { |
1168 | for (i = 0; i < 100; i++) { |
1169 | val = bus_space_read_4(sc->bce_btag, |
1170 | sc->bce_bhandle, BCE_DMA_RXSTATUS); |
1171 | if (val & RS_DMA_IDLE) |
1172 | break; |
1173 | delay(10); |
1174 | } |
1175 | if (i == 100) { |
1176 | aprint_error_dev(sc->bce_dev, |
1177 | "receive dma did not go idle after" |
1178 | " error\n" ); |
1179 | } |
1180 | } |
1181 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, |
1182 | BCE_DMA_RXSTATUS, 0); |
1183 | |
1184 | /* reset ethernet mac */ |
1185 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, |
1186 | EC_ES); |
1187 | for (i = 0; i < 200; i++) { |
1188 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1189 | BCE_ENET_CTL); |
1190 | if (!(val & EC_ES)) |
1191 | break; |
1192 | delay(10); |
1193 | } |
1194 | if (i == 200) { |
1195 | aprint_error_dev(sc->bce_dev, |
1196 | "timed out resetting ethernet mac\n" ); |
1197 | } |
1198 | } else { |
1199 | uint32_t reg_win; |
1200 | |
1201 | /* remap the pci registers to the Sonics config registers */ |
1202 | |
1203 | /* save the current map, so it can be restored */ |
1204 | reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, |
1205 | BCE_REG_WIN); |
1206 | /* set register window to Sonics registers */ |
1207 | pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, |
1208 | BCE_REG_WIN, BCE_SONICS_WIN); |
1209 | |
1210 | /* enable SB to PCI interrupt */ |
1211 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, |
1212 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1213 | BCE_SBINTVEC) | |
1214 | SBIV_ENET0); |
1215 | |
1216 | /* enable prefetch and bursts for sonics-to-pci translation 2 */ |
1217 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, |
1218 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1219 | BCE_SPCI_TR2) | |
1220 | SBTOPCI_PREF | SBTOPCI_BURST); |
1221 | |
1222 | /* restore to ethernet register space */ |
1223 | pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, |
1224 | reg_win); |
1225 | } |
1226 | |
1227 | /* disable SB core if not in reset */ |
1228 | if (!(sbval & SBTML_RESET)) { |
1229 | |
1230 | /* set the reject bit */ |
1231 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, |
1232 | BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK); |
1233 | for (i = 0; i < 200; i++) { |
1234 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1235 | BCE_SBTMSTATELOW); |
1236 | if (val & SBTML_REJ) |
1237 | break; |
1238 | delay(1); |
1239 | } |
1240 | if (i == 200) { |
1241 | aprint_error_dev(sc->bce_dev, |
1242 | "while resetting core, reject did not set\n" ); |
1243 | } |
1244 | /* wait until busy is clear */ |
1245 | for (i = 0; i < 200; i++) { |
1246 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1247 | BCE_SBTMSTATEHI); |
1248 | if (!(val & 0x4)) |
1249 | break; |
1250 | delay(1); |
1251 | } |
1252 | if (i == 200) { |
1253 | aprint_error_dev(sc->bce_dev, |
1254 | "while resetting core, busy did not clear\n" ); |
1255 | } |
1256 | /* set reset and reject while enabling the clocks */ |
1257 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, |
1258 | BCE_SBTMSTATELOW, |
1259 | SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET); |
1260 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1261 | BCE_SBTMSTATELOW); |
1262 | delay(10); |
1263 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, |
1264 | BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET); |
1265 | delay(1); |
1266 | } |
1267 | /* enable clock */ |
1268 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, |
1269 | SBTML_FGC | SBTML_CLK | SBTML_RESET); |
1270 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); |
1271 | delay(1); |
1272 | |
1273 | /* clear any error bits that may be on */ |
1274 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI); |
1275 | if (val & 1) |
1276 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI, |
1277 | 0); |
1278 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE); |
1279 | if (val & SBIM_MAGIC_ERRORBITS) |
1280 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE, |
1281 | val & ~SBIM_MAGIC_ERRORBITS); |
1282 | |
1283 | /* clear reset and allow it to propagate throughout the core */ |
1284 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, |
1285 | SBTML_FGC | SBTML_CLK); |
1286 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); |
1287 | delay(1); |
1288 | |
1289 | /* leave clock enabled */ |
1290 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, |
1291 | SBTML_CLK); |
1292 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); |
1293 | delay(1); |
1294 | |
1295 | /* initialize MDC preamble, frequency */ |
1296 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d); /* MAGIC */ |
1297 | |
1298 | /* enable phy, differs for internal, and external */ |
1299 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL); |
1300 | if (!(val & BCE_DC_IP)) { |
1301 | /* select external phy */ |
1302 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_EP); |
1303 | } else if (val & BCE_DC_ER) { /* internal, clear reset bit if on */ |
1304 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL, |
1305 | val & ~BCE_DC_ER); |
1306 | delay(100); |
1307 | } |
1308 | } |
1309 | |
1310 | /* Set up the receive filter. */ |
1311 | void |
1312 | bce_set_filter(struct ifnet *ifp) |
1313 | { |
1314 | struct bce_softc *sc = ifp->if_softc; |
1315 | |
1316 | if (ifp->if_flags & IFF_PROMISC) { |
1317 | ifp->if_flags |= IFF_ALLMULTI; |
1318 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, |
1319 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) |
1320 | | ERC_PE); |
1321 | } else { |
1322 | ifp->if_flags &= ~IFF_ALLMULTI; |
1323 | |
1324 | /* turn off promiscuous */ |
1325 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, |
1326 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1327 | BCE_RX_CTL) & ~ERC_PE); |
1328 | |
1329 | /* enable/disable broadcast */ |
1330 | if (ifp->if_flags & IFF_BROADCAST) |
1331 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, |
1332 | BCE_RX_CTL, bus_space_read_4(sc->bce_btag, |
1333 | sc->bce_bhandle, BCE_RX_CTL) & ~ERC_DB); |
1334 | else |
1335 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, |
1336 | BCE_RX_CTL, bus_space_read_4(sc->bce_btag, |
1337 | sc->bce_bhandle, BCE_RX_CTL) | ERC_DB); |
1338 | |
1339 | /* disable the filter */ |
1340 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, |
1341 | 0); |
1342 | |
1343 | /* add our own address */ |
1344 | bce_add_mac(sc, sc->enaddr, 0); |
1345 | |
1346 | /* for now accept all multicast */ |
1347 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, |
1348 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) | |
1349 | ERC_AM); |
1350 | ifp->if_flags |= IFF_ALLMULTI; |
1351 | |
1352 | /* enable the filter */ |
1353 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, |
1354 | bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1355 | BCE_FILT_CTL) | 1); |
1356 | } |
1357 | } |
1358 | |
1359 | static bool |
1360 | bce_resume(device_t self, const pmf_qual_t *qual) |
1361 | { |
1362 | struct bce_softc *sc = device_private(self); |
1363 | |
1364 | bce_reset(sc); |
1365 | |
1366 | return true; |
1367 | } |
1368 | |
1369 | /* Read a PHY register on the MII. */ |
1370 | int |
1371 | bce_mii_read(device_t self, int phy, int reg) |
1372 | { |
1373 | struct bce_softc *sc = device_private(self); |
1374 | int i; |
1375 | uint32_t val; |
1376 | |
1377 | /* clear mii_int */ |
1378 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, BCE_MIINTR); |
1379 | |
1380 | /* Read the PHY register */ |
1381 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, |
1382 | (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ |
1383 | (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg)); /* MAGIC */ |
1384 | |
1385 | for (i = 0; i < BCE_TIMEOUT; i++) { |
1386 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS); |
1387 | if (val & BCE_MIINTR) |
1388 | break; |
1389 | delay(10); |
1390 | } |
1391 | val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); |
1392 | if (i == BCE_TIMEOUT) { |
1393 | aprint_error_dev(sc->bce_dev, |
1394 | "PHY read timed out reading phy %d, reg %d, val = " |
1395 | "0x%08x\n" , phy, reg, val); |
1396 | return (0); |
1397 | } |
1398 | return (val & BCE_MICOMM_DATA); |
1399 | } |
1400 | |
1401 | /* Write a PHY register on the MII */ |
1402 | void |
1403 | bce_mii_write(device_t self, int phy, int reg, int val) |
1404 | { |
1405 | struct bce_softc *sc = device_private(self); |
1406 | int i; |
1407 | uint32_t rval; |
1408 | |
1409 | /* clear mii_int */ |
1410 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, |
1411 | BCE_MIINTR); |
1412 | |
1413 | /* Write the PHY register */ |
1414 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, |
1415 | (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ |
1416 | (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) | /* MAGIC */ |
1417 | BCE_MIPHY(phy) | BCE_MIREG(reg)); |
1418 | |
1419 | /* wait for write to complete */ |
1420 | for (i = 0; i < BCE_TIMEOUT; i++) { |
1421 | rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, |
1422 | BCE_MI_STS); |
1423 | if (rval & BCE_MIINTR) |
1424 | break; |
1425 | delay(10); |
1426 | } |
1427 | rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); |
1428 | if (i == BCE_TIMEOUT) { |
1429 | aprint_error_dev(sc->bce_dev, |
1430 | "PHY timed out writing phy %d, reg %d, val = 0x%08x\n" , phy, |
1431 | reg, val); |
1432 | } |
1433 | } |
1434 | |
1435 | /* sync hardware duplex mode to software state */ |
1436 | void |
1437 | bce_statchg(struct ifnet *ifp) |
1438 | { |
1439 | struct bce_softc *sc = ifp->if_softc; |
1440 | uint32_t reg; |
1441 | |
1442 | /* if needed, change register to match duplex mode */ |
1443 | reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL); |
1444 | if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD)) |
1445 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, |
1446 | reg | EXC_FD); |
1447 | else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD) |
1448 | bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, |
1449 | reg & ~EXC_FD); |
1450 | |
1451 | /* |
1452 | * Enable activity led. |
1453 | * XXX This should be in a phy driver, but not currently. |
1454 | */ |
1455 | bce_mii_write(sc->bce_dev, 1, 26, /* MAGIC */ |
1456 | bce_mii_read(sc->bce_dev, 1, 26) & 0x7fff); /* MAGIC */ |
1457 | /* enable traffic meter led mode */ |
1458 | bce_mii_write(sc->bce_dev, 1, 26, /* MAGIC */ |
1459 | bce_mii_read(sc->bce_dev, 1, 27) | (1 << 6)); /* MAGIC */ |
1460 | } |
1461 | |
1462 | /* One second timer, checks link status */ |
1463 | static void |
1464 | bce_tick(void *v) |
1465 | { |
1466 | struct bce_softc *sc = v; |
1467 | |
1468 | /* Tick the MII. */ |
1469 | mii_tick(&sc->bce_mii); |
1470 | |
1471 | callout_reset(&sc->bce_timeout, hz, bce_tick, sc); |
1472 | } |
1473 | |