1 | /* $NetBSD: if_ale.c,v 1.20 2016/02/09 08:32:11 ozaki-r Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> |
5 | * All rights reserved. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice unmodified, this list of conditions, and the following |
12 | * disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 | * SUCH DAMAGE. |
28 | * |
29 | * $FreeBSD: src/sys/dev/ale/if_ale.c,v 1.3 2008/12/03 09:01:12 yongari Exp $ |
30 | */ |
31 | |
32 | /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */ |
33 | |
34 | #include <sys/cdefs.h> |
35 | __KERNEL_RCSID(0, "$NetBSD: if_ale.c,v 1.20 2016/02/09 08:32:11 ozaki-r Exp $" ); |
36 | |
37 | #include "vlan.h" |
38 | |
39 | #include <sys/param.h> |
40 | #include <sys/proc.h> |
41 | #include <sys/endian.h> |
42 | #include <sys/systm.h> |
43 | #include <sys/types.h> |
44 | #include <sys/sockio.h> |
45 | #include <sys/mbuf.h> |
46 | #include <sys/queue.h> |
47 | #include <sys/kernel.h> |
48 | #include <sys/device.h> |
49 | #include <sys/callout.h> |
50 | #include <sys/socket.h> |
51 | |
52 | #include <sys/bus.h> |
53 | |
54 | #include <net/if.h> |
55 | #include <net/if_dl.h> |
56 | #include <net/if_llc.h> |
57 | #include <net/if_media.h> |
58 | #include <net/if_ether.h> |
59 | |
60 | #ifdef INET |
61 | #include <netinet/in.h> |
62 | #include <netinet/in_systm.h> |
63 | #include <netinet/in_var.h> |
64 | #include <netinet/ip.h> |
65 | #endif |
66 | |
67 | #include <net/if_types.h> |
68 | #include <net/if_vlanvar.h> |
69 | |
70 | #include <net/bpf.h> |
71 | |
72 | #include <dev/mii/mii.h> |
73 | #include <dev/mii/miivar.h> |
74 | |
75 | #include <dev/pci/pcireg.h> |
76 | #include <dev/pci/pcivar.h> |
77 | #include <dev/pci/pcidevs.h> |
78 | |
79 | #include <dev/pci/if_alereg.h> |
80 | |
81 | static int ale_match(device_t, cfdata_t, void *); |
82 | static void ale_attach(device_t, device_t, void *); |
83 | static int ale_detach(device_t, int); |
84 | |
85 | static int ale_miibus_readreg(device_t, int, int); |
86 | static void ale_miibus_writereg(device_t, int, int, int); |
87 | static void ale_miibus_statchg(struct ifnet *); |
88 | |
89 | static int ale_init(struct ifnet *); |
90 | static void ale_start(struct ifnet *); |
91 | static int ale_ioctl(struct ifnet *, u_long, void *); |
92 | static void ale_watchdog(struct ifnet *); |
93 | static int ale_mediachange(struct ifnet *); |
94 | static void ale_mediastatus(struct ifnet *, struct ifmediareq *); |
95 | |
96 | static int ale_intr(void *); |
97 | static int ale_rxeof(struct ale_softc *sc); |
98 | static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **, |
99 | uint32_t, uint32_t *); |
100 | static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t); |
101 | static void ale_txeof(struct ale_softc *); |
102 | |
103 | static int ale_dma_alloc(struct ale_softc *); |
104 | static void ale_dma_free(struct ale_softc *); |
105 | static int ale_encap(struct ale_softc *, struct mbuf **); |
106 | static void ale_init_rx_pages(struct ale_softc *); |
107 | static void ale_init_tx_ring(struct ale_softc *); |
108 | |
109 | static void ale_stop(struct ifnet *, int); |
110 | static void ale_tick(void *); |
111 | static void ale_get_macaddr(struct ale_softc *); |
112 | static void ale_mac_config(struct ale_softc *); |
113 | static void ale_phy_reset(struct ale_softc *); |
114 | static void ale_reset(struct ale_softc *); |
115 | static void ale_rxfilter(struct ale_softc *); |
116 | static void ale_rxvlan(struct ale_softc *); |
117 | static void ale_stats_clear(struct ale_softc *); |
118 | static void ale_stats_update(struct ale_softc *); |
119 | static void ale_stop_mac(struct ale_softc *); |
120 | |
121 | CFATTACH_DECL_NEW(ale, sizeof(struct ale_softc), |
122 | ale_match, ale_attach, ale_detach, NULL); |
123 | |
124 | int aledebug = 0; |
125 | #define DPRINTF(x) do { if (aledebug) printf x; } while (0) |
126 | |
127 | #define ETHER_ALIGN 2 |
128 | #define ALE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) |
129 | |
130 | static int |
131 | ale_miibus_readreg(device_t dev, int phy, int reg) |
132 | { |
133 | struct ale_softc *sc = device_private(dev); |
134 | uint32_t v; |
135 | int i; |
136 | |
137 | if (phy != sc->ale_phyaddr) |
138 | return 0; |
139 | |
140 | if (sc->ale_flags & ALE_FLAG_FASTETHER) { |
141 | switch (reg) { |
142 | case MII_100T2CR: |
143 | case MII_100T2SR: |
144 | case MII_EXTSR: |
145 | return 0; |
146 | default: |
147 | break; |
148 | } |
149 | } |
150 | |
151 | CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | |
152 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); |
153 | for (i = ALE_PHY_TIMEOUT; i > 0; i--) { |
154 | DELAY(5); |
155 | v = CSR_READ_4(sc, ALE_MDIO); |
156 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) |
157 | break; |
158 | } |
159 | |
160 | if (i == 0) { |
161 | printf("%s: phy read timeout: phy %d, reg %d\n" , |
162 | device_xname(sc->sc_dev), phy, reg); |
163 | return 0; |
164 | } |
165 | |
166 | return (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; |
167 | } |
168 | |
169 | static void |
170 | ale_miibus_writereg(device_t dev, int phy, int reg, int val) |
171 | { |
172 | struct ale_softc *sc = device_private(dev); |
173 | uint32_t v; |
174 | int i; |
175 | |
176 | if (phy != sc->ale_phyaddr) |
177 | return; |
178 | |
179 | if (sc->ale_flags & ALE_FLAG_FASTETHER) { |
180 | switch (reg) { |
181 | case MII_100T2CR: |
182 | case MII_100T2SR: |
183 | case MII_EXTSR: |
184 | return; |
185 | default: |
186 | break; |
187 | } |
188 | } |
189 | |
190 | CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | |
191 | (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | |
192 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); |
193 | for (i = ALE_PHY_TIMEOUT; i > 0; i--) { |
194 | DELAY(5); |
195 | v = CSR_READ_4(sc, ALE_MDIO); |
196 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) |
197 | break; |
198 | } |
199 | |
200 | if (i == 0) |
201 | printf("%s: phy write timeout: phy %d, reg %d\n" , |
202 | device_xname(sc->sc_dev), phy, reg); |
203 | } |
204 | |
205 | static void |
206 | ale_miibus_statchg(struct ifnet *ifp) |
207 | { |
208 | struct ale_softc *sc = ifp->if_softc; |
209 | struct mii_data *mii = &sc->sc_miibus; |
210 | uint32_t reg; |
211 | |
212 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
213 | return; |
214 | |
215 | sc->ale_flags &= ~ALE_FLAG_LINK; |
216 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
217 | (IFM_ACTIVE | IFM_AVALID)) { |
218 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
219 | case IFM_10_T: |
220 | case IFM_100_TX: |
221 | sc->ale_flags |= ALE_FLAG_LINK; |
222 | break; |
223 | |
224 | case IFM_1000_T: |
225 | if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) |
226 | sc->ale_flags |= ALE_FLAG_LINK; |
227 | break; |
228 | |
229 | default: |
230 | break; |
231 | } |
232 | } |
233 | |
234 | /* Stop Rx/Tx MACs. */ |
235 | ale_stop_mac(sc); |
236 | |
237 | /* Program MACs with resolved speed/duplex/flow-control. */ |
238 | if ((sc->ale_flags & ALE_FLAG_LINK) != 0) { |
239 | ale_mac_config(sc); |
240 | /* Reenable Tx/Rx MACs. */ |
241 | reg = CSR_READ_4(sc, ALE_MAC_CFG); |
242 | reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; |
243 | CSR_WRITE_4(sc, ALE_MAC_CFG, reg); |
244 | } |
245 | } |
246 | |
247 | void |
248 | ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
249 | { |
250 | struct ale_softc *sc = ifp->if_softc; |
251 | struct mii_data *mii = &sc->sc_miibus; |
252 | |
253 | mii_pollstat(mii); |
254 | ifmr->ifm_status = mii->mii_media_status; |
255 | ifmr->ifm_active = mii->mii_media_active; |
256 | } |
257 | |
258 | int |
259 | ale_mediachange(struct ifnet *ifp) |
260 | { |
261 | struct ale_softc *sc = ifp->if_softc; |
262 | struct mii_data *mii = &sc->sc_miibus; |
263 | int error; |
264 | |
265 | if (mii->mii_instance != 0) { |
266 | struct mii_softc *miisc; |
267 | |
268 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
269 | mii_phy_reset(miisc); |
270 | } |
271 | error = mii_mediachg(mii); |
272 | |
273 | return error; |
274 | } |
275 | |
276 | int |
277 | ale_match(device_t dev, cfdata_t match, void *aux) |
278 | { |
279 | struct pci_attach_args *pa = aux; |
280 | |
281 | return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && |
282 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_L1E); |
283 | } |
284 | |
285 | void |
286 | ale_get_macaddr(struct ale_softc *sc) |
287 | { |
288 | uint32_t ea[2], reg; |
289 | int i, vpdc; |
290 | |
291 | reg = CSR_READ_4(sc, ALE_SPI_CTRL); |
292 | if ((reg & SPI_VPD_ENB) != 0) { |
293 | reg &= ~SPI_VPD_ENB; |
294 | CSR_WRITE_4(sc, ALE_SPI_CTRL, reg); |
295 | } |
296 | |
297 | if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, PCI_CAP_VPD, |
298 | &vpdc, NULL)) { |
299 | /* |
300 | * PCI VPD capability found, let TWSI reload EEPROM. |
301 | * This will set ethernet address of controller. |
302 | */ |
303 | CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) | |
304 | TWSI_CTRL_SW_LD_START); |
305 | for (i = 100; i > 0; i--) { |
306 | DELAY(1000); |
307 | reg = CSR_READ_4(sc, ALE_TWSI_CTRL); |
308 | if ((reg & TWSI_CTRL_SW_LD_START) == 0) |
309 | break; |
310 | } |
311 | if (i == 0) |
312 | printf("%s: reloading EEPROM timeout!\n" , |
313 | device_xname(sc->sc_dev)); |
314 | } else { |
315 | if (aledebug) |
316 | printf("%s: PCI VPD capability not found!\n" , |
317 | device_xname(sc->sc_dev)); |
318 | } |
319 | |
320 | ea[0] = CSR_READ_4(sc, ALE_PAR0); |
321 | ea[1] = CSR_READ_4(sc, ALE_PAR1); |
322 | sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF; |
323 | sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF; |
324 | sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF; |
325 | sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF; |
326 | sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF; |
327 | sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF; |
328 | } |
329 | |
330 | void |
331 | ale_phy_reset(struct ale_softc *sc) |
332 | { |
333 | /* Reset magic from Linux. */ |
334 | CSR_WRITE_2(sc, ALE_GPHY_CTRL, |
335 | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | |
336 | GPHY_CTRL_PHY_PLL_ON); |
337 | DELAY(1000); |
338 | CSR_WRITE_2(sc, ALE_GPHY_CTRL, |
339 | GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | |
340 | GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON); |
341 | DELAY(1000); |
342 | |
343 | #define ATPHY_DBG_ADDR 0x1D |
344 | #define ATPHY_DBG_DATA 0x1E |
345 | |
346 | /* Enable hibernation mode. */ |
347 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
348 | ATPHY_DBG_ADDR, 0x0B); |
349 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
350 | ATPHY_DBG_DATA, 0xBC00); |
351 | /* Set Class A/B for all modes. */ |
352 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
353 | ATPHY_DBG_ADDR, 0x00); |
354 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
355 | ATPHY_DBG_DATA, 0x02EF); |
356 | /* Enable 10BT power saving. */ |
357 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
358 | ATPHY_DBG_ADDR, 0x12); |
359 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
360 | ATPHY_DBG_DATA, 0x4C04); |
361 | /* Adjust 1000T power. */ |
362 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
363 | ATPHY_DBG_ADDR, 0x04); |
364 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
365 | ATPHY_DBG_DATA, 0x8BBB); |
366 | /* 10BT center tap voltage. */ |
367 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
368 | ATPHY_DBG_ADDR, 0x05); |
369 | ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, |
370 | ATPHY_DBG_DATA, 0x2C46); |
371 | |
372 | #undef ATPHY_DBG_ADDR |
373 | #undef ATPHY_DBG_DATA |
374 | DELAY(1000); |
375 | } |
376 | |
377 | void |
378 | ale_attach(device_t parent, device_t self, void *aux) |
379 | { |
380 | struct ale_softc *sc = device_private(self); |
381 | struct pci_attach_args *pa = aux; |
382 | pci_chipset_tag_t pc = pa->pa_pc; |
383 | pci_intr_handle_t ih; |
384 | const char *intrstr; |
385 | struct ifnet *ifp; |
386 | pcireg_t memtype; |
387 | int mii_flags, error = 0; |
388 | uint32_t rxf_len, txf_len; |
389 | const char *chipname; |
390 | char intrbuf[PCI_INTRSTR_LEN]; |
391 | |
392 | aprint_naive("\n" ); |
393 | aprint_normal(": Attansic/Atheros L1E Ethernet\n" ); |
394 | |
395 | sc->sc_dev = self; |
396 | sc->sc_dmat = pa->pa_dmat; |
397 | sc->sc_pct = pa->pa_pc; |
398 | sc->sc_pcitag = pa->pa_tag; |
399 | |
400 | /* |
401 | * Allocate IO memory |
402 | */ |
403 | memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, ALE_PCIR_BAR); |
404 | switch (memtype) { |
405 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
406 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: |
407 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
408 | break; |
409 | default: |
410 | aprint_error_dev(self, "invalid base address register\n" ); |
411 | break; |
412 | } |
413 | |
414 | if (pci_mapreg_map(pa, ALE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, |
415 | &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { |
416 | aprint_error_dev(self, "could not map mem space\n" ); |
417 | return; |
418 | } |
419 | |
420 | if (pci_intr_map(pa, &ih) != 0) { |
421 | aprint_error_dev(self, "could not map interrupt\n" ); |
422 | goto fail; |
423 | } |
424 | |
425 | /* |
426 | * Allocate IRQ |
427 | */ |
428 | intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf)); |
429 | sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, ale_intr, sc); |
430 | if (sc->sc_irq_handle == NULL) { |
431 | aprint_error_dev(self, "could not establish interrupt" ); |
432 | if (intrstr != NULL) |
433 | aprint_error(" at %s" , intrstr); |
434 | aprint_error("\n" ); |
435 | goto fail; |
436 | } |
437 | |
438 | /* Set PHY address. */ |
439 | sc->ale_phyaddr = ALE_PHY_ADDR; |
440 | |
441 | /* Reset PHY. */ |
442 | ale_phy_reset(sc); |
443 | |
444 | /* Reset the ethernet controller. */ |
445 | ale_reset(sc); |
446 | |
447 | /* Get PCI and chip id/revision. */ |
448 | sc->ale_rev = PCI_REVISION(pa->pa_class); |
449 | if (sc->ale_rev >= 0xF0) { |
450 | /* L2E Rev. B. AR8114 */ |
451 | sc->ale_flags |= ALE_FLAG_FASTETHER; |
452 | chipname = "AR8114 (L2E RevB)" ; |
453 | } else { |
454 | if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) { |
455 | /* L1E AR8121 */ |
456 | sc->ale_flags |= ALE_FLAG_JUMBO; |
457 | chipname = "AR8121 (L1E)" ; |
458 | } else { |
459 | /* L2E Rev. A. AR8113 */ |
460 | sc->ale_flags |= ALE_FLAG_FASTETHER; |
461 | chipname = "AR8113 (L2E RevA)" ; |
462 | } |
463 | } |
464 | aprint_normal_dev(self, "%s, %s\n" , chipname, intrstr); |
465 | |
466 | /* |
467 | * All known controllers seems to require 4 bytes alignment |
468 | * of Tx buffers to make Tx checksum offload with custom |
469 | * checksum generation method work. |
470 | */ |
471 | sc->ale_flags |= ALE_FLAG_TXCSUM_BUG; |
472 | |
473 | /* |
474 | * All known controllers seems to have issues on Rx checksum |
475 | * offload for fragmented IP datagrams. |
476 | */ |
477 | sc->ale_flags |= ALE_FLAG_RXCSUM_BUG; |
478 | |
479 | /* |
480 | * Don't use Tx CMB. It is known to cause RRS update failure |
481 | * under certain circumstances. Typical phenomenon of the |
482 | * issue would be unexpected sequence number encountered in |
483 | * Rx handler. |
484 | */ |
485 | sc->ale_flags |= ALE_FLAG_TXCMB_BUG; |
486 | sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >> |
487 | MASTER_CHIP_REV_SHIFT; |
488 | aprint_debug_dev(self, "PCI device revision : 0x%04x\n" , sc->ale_rev); |
489 | aprint_debug_dev(self, "Chip id/revision : 0x%04x\n" , sc->ale_chip_rev); |
490 | |
491 | /* |
492 | * Uninitialized hardware returns an invalid chip id/revision |
493 | * as well as 0xFFFFFFFF for Tx/Rx fifo length. |
494 | */ |
495 | txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN); |
496 | rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); |
497 | if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF || |
498 | rxf_len == 0xFFFFFFF) { |
499 | aprint_error_dev(self, "chip revision : 0x%04x, %u Tx FIFO " |
500 | "%u Rx FIFO -- not initialized?\n" , |
501 | sc->ale_chip_rev, txf_len, rxf_len); |
502 | goto fail; |
503 | } |
504 | |
505 | if (aledebug) { |
506 | printf("%s: %u Tx FIFO, %u Rx FIFO\n" , device_xname(sc->sc_dev), |
507 | txf_len, rxf_len); |
508 | } |
509 | |
510 | /* Set max allowable DMA size. */ |
511 | sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128; |
512 | sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128; |
513 | |
514 | callout_init(&sc->sc_tick_ch, 0); |
515 | callout_setfunc(&sc->sc_tick_ch, ale_tick, sc); |
516 | |
517 | error = ale_dma_alloc(sc); |
518 | if (error) |
519 | goto fail; |
520 | |
521 | /* Load station address. */ |
522 | ale_get_macaddr(sc); |
523 | |
524 | aprint_normal_dev(self, "Ethernet address %s\n" , |
525 | ether_sprintf(sc->ale_eaddr)); |
526 | |
527 | ifp = &sc->sc_ec.ec_if; |
528 | ifp->if_softc = sc; |
529 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
530 | ifp->if_init = ale_init; |
531 | ifp->if_ioctl = ale_ioctl; |
532 | ifp->if_start = ale_start; |
533 | ifp->if_stop = ale_stop; |
534 | ifp->if_watchdog = ale_watchdog; |
535 | IFQ_SET_MAXLEN(&ifp->if_snd, ALE_TX_RING_CNT - 1); |
536 | IFQ_SET_READY(&ifp->if_snd); |
537 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
538 | |
539 | sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; |
540 | |
541 | #ifdef ALE_CHECKSUM |
542 | ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
543 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
544 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
545 | #endif |
546 | |
547 | #if NVLAN > 0 |
548 | sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; |
549 | #endif |
550 | |
551 | /* Set up MII bus. */ |
552 | sc->sc_miibus.mii_ifp = ifp; |
553 | sc->sc_miibus.mii_readreg = ale_miibus_readreg; |
554 | sc->sc_miibus.mii_writereg = ale_miibus_writereg; |
555 | sc->sc_miibus.mii_statchg = ale_miibus_statchg; |
556 | |
557 | sc->sc_ec.ec_mii = &sc->sc_miibus; |
558 | ifmedia_init(&sc->sc_miibus.mii_media, 0, ale_mediachange, |
559 | ale_mediastatus); |
560 | mii_flags = 0; |
561 | if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) |
562 | mii_flags |= MIIF_DOPAUSE; |
563 | mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, |
564 | MII_OFFSET_ANY, mii_flags); |
565 | |
566 | if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { |
567 | aprint_error_dev(self, "no PHY found!\n" ); |
568 | ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, |
569 | 0, NULL); |
570 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); |
571 | } else |
572 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); |
573 | |
574 | if_attach(ifp); |
575 | ether_ifattach(ifp, sc->ale_eaddr); |
576 | |
577 | if (pmf_device_register(self, NULL, NULL)) |
578 | pmf_class_network_register(self, ifp); |
579 | else |
580 | aprint_error_dev(self, "couldn't establish power handler\n" ); |
581 | |
582 | return; |
583 | fail: |
584 | ale_dma_free(sc); |
585 | if (sc->sc_irq_handle != NULL) { |
586 | pci_intr_disestablish(pc, sc->sc_irq_handle); |
587 | sc->sc_irq_handle = NULL; |
588 | } |
589 | if (sc->sc_mem_size) { |
590 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); |
591 | sc->sc_mem_size = 0; |
592 | } |
593 | } |
594 | |
595 | static int |
596 | ale_detach(device_t self, int flags) |
597 | { |
598 | struct ale_softc *sc = device_private(self); |
599 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
600 | int s; |
601 | |
602 | pmf_device_deregister(self); |
603 | s = splnet(); |
604 | ale_stop(ifp, 0); |
605 | splx(s); |
606 | |
607 | mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); |
608 | |
609 | /* Delete all remaining media. */ |
610 | ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); |
611 | |
612 | ether_ifdetach(ifp); |
613 | if_detach(ifp); |
614 | ale_dma_free(sc); |
615 | |
616 | if (sc->sc_irq_handle != NULL) { |
617 | pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); |
618 | sc->sc_irq_handle = NULL; |
619 | } |
620 | if (sc->sc_mem_size) { |
621 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); |
622 | sc->sc_mem_size = 0; |
623 | } |
624 | |
625 | return 0; |
626 | } |
627 | |
628 | |
629 | static int |
630 | ale_dma_alloc(struct ale_softc *sc) |
631 | { |
632 | struct ale_txdesc *txd; |
633 | int nsegs, error, guard_size, i; |
634 | |
635 | if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) |
636 | guard_size = ALE_JUMBO_FRAMELEN; |
637 | else |
638 | guard_size = ALE_MAX_FRAMELEN; |
639 | sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ, |
640 | ALE_RX_PAGE_ALIGN); |
641 | |
642 | /* |
643 | * Create DMA stuffs for TX ring |
644 | */ |
645 | error = bus_dmamap_create(sc->sc_dmat, ALE_TX_RING_SZ, 1, |
646 | ALE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_ring_map); |
647 | if (error) { |
648 | sc->ale_cdata.ale_tx_ring_map = NULL; |
649 | return ENOBUFS; |
650 | } |
651 | |
652 | /* Allocate DMA'able memory for TX ring */ |
653 | error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_RING_SZ, |
654 | 0, 0, &sc->ale_cdata.ale_tx_ring_seg, 1, |
655 | &nsegs, BUS_DMA_WAITOK); |
656 | if (error) { |
657 | printf("%s: could not allocate DMA'able memory for Tx ring, " |
658 | "error = %i\n" , device_xname(sc->sc_dev), error); |
659 | return error; |
660 | } |
661 | |
662 | error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_ring_seg, |
663 | nsegs, ALE_TX_RING_SZ, (void **)&sc->ale_cdata.ale_tx_ring, |
664 | BUS_DMA_NOWAIT); |
665 | if (error) |
666 | return ENOBUFS; |
667 | |
668 | memset(sc->ale_cdata.ale_tx_ring, 0, ALE_TX_RING_SZ); |
669 | |
670 | /* Load the DMA map for Tx ring. */ |
671 | error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, |
672 | sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); |
673 | if (error) { |
674 | printf("%s: could not load DMA'able memory for Tx ring.\n" , |
675 | device_xname(sc->sc_dev)); |
676 | bus_dmamem_free(sc->sc_dmat, |
677 | &sc->ale_cdata.ale_tx_ring_seg, 1); |
678 | return error; |
679 | } |
680 | sc->ale_cdata.ale_tx_ring_paddr = |
681 | sc->ale_cdata.ale_tx_ring_map->dm_segs[0].ds_addr; |
682 | |
683 | for (i = 0; i < ALE_RX_PAGES; i++) { |
684 | /* |
685 | * Create DMA stuffs for RX pages |
686 | */ |
687 | error = bus_dmamap_create(sc->sc_dmat, sc->ale_pagesize, 1, |
688 | sc->ale_pagesize, 0, BUS_DMA_NOWAIT, |
689 | &sc->ale_cdata.ale_rx_page[i].page_map); |
690 | if (error) { |
691 | sc->ale_cdata.ale_rx_page[i].page_map = NULL; |
692 | return ENOBUFS; |
693 | } |
694 | |
695 | /* Allocate DMA'able memory for RX pages */ |
696 | error = bus_dmamem_alloc(sc->sc_dmat, sc->ale_pagesize, |
697 | ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].page_seg, |
698 | 1, &nsegs, BUS_DMA_WAITOK); |
699 | if (error) { |
700 | printf("%s: could not allocate DMA'able memory for " |
701 | "Rx ring.\n" , device_xname(sc->sc_dev)); |
702 | return error; |
703 | } |
704 | error = bus_dmamem_map(sc->sc_dmat, |
705 | &sc->ale_cdata.ale_rx_page[i].page_seg, nsegs, |
706 | sc->ale_pagesize, |
707 | (void **)&sc->ale_cdata.ale_rx_page[i].page_addr, |
708 | BUS_DMA_NOWAIT); |
709 | if (error) |
710 | return ENOBUFS; |
711 | |
712 | memset(sc->ale_cdata.ale_rx_page[i].page_addr, 0, |
713 | sc->ale_pagesize); |
714 | |
715 | /* Load the DMA map for Rx pages. */ |
716 | error = bus_dmamap_load(sc->sc_dmat, |
717 | sc->ale_cdata.ale_rx_page[i].page_map, |
718 | sc->ale_cdata.ale_rx_page[i].page_addr, |
719 | sc->ale_pagesize, NULL, BUS_DMA_WAITOK); |
720 | if (error) { |
721 | printf("%s: could not load DMA'able memory for " |
722 | "Rx pages.\n" , device_xname(sc->sc_dev)); |
723 | bus_dmamem_free(sc->sc_dmat, |
724 | &sc->ale_cdata.ale_rx_page[i].page_seg, 1); |
725 | return error; |
726 | } |
727 | sc->ale_cdata.ale_rx_page[i].page_paddr = |
728 | sc->ale_cdata.ale_rx_page[i].page_map->dm_segs[0].ds_addr; |
729 | } |
730 | |
731 | /* |
732 | * Create DMA stuffs for Tx CMB. |
733 | */ |
734 | error = bus_dmamap_create(sc->sc_dmat, ALE_TX_CMB_SZ, 1, |
735 | ALE_TX_CMB_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_cmb_map); |
736 | if (error) { |
737 | sc->ale_cdata.ale_tx_cmb_map = NULL; |
738 | return ENOBUFS; |
739 | } |
740 | |
741 | /* Allocate DMA'able memory for Tx CMB. */ |
742 | error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_CMB_SZ, ETHER_ALIGN, 0, |
743 | &sc->ale_cdata.ale_tx_cmb_seg, 1, &nsegs, BUS_DMA_WAITOK); |
744 | |
745 | if (error) { |
746 | printf("%s: could not allocate DMA'able memory for Tx CMB.\n" , |
747 | device_xname(sc->sc_dev)); |
748 | return error; |
749 | } |
750 | |
751 | error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_cmb_seg, |
752 | nsegs, ALE_TX_CMB_SZ, (void **)&sc->ale_cdata.ale_tx_cmb, |
753 | BUS_DMA_NOWAIT); |
754 | if (error) |
755 | return ENOBUFS; |
756 | |
757 | memset(sc->ale_cdata.ale_tx_cmb, 0, ALE_TX_CMB_SZ); |
758 | |
759 | /* Load the DMA map for Tx CMB. */ |
760 | error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, |
761 | sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ, NULL, BUS_DMA_WAITOK); |
762 | if (error) { |
763 | printf("%s: could not load DMA'able memory for Tx CMB.\n" , |
764 | device_xname(sc->sc_dev)); |
765 | bus_dmamem_free(sc->sc_dmat, |
766 | &sc->ale_cdata.ale_tx_cmb_seg, 1); |
767 | return error; |
768 | } |
769 | |
770 | sc->ale_cdata.ale_tx_cmb_paddr = |
771 | sc->ale_cdata.ale_tx_cmb_map->dm_segs[0].ds_addr; |
772 | |
773 | for (i = 0; i < ALE_RX_PAGES; i++) { |
774 | /* |
775 | * Create DMA stuffs for Rx CMB. |
776 | */ |
777 | error = bus_dmamap_create(sc->sc_dmat, ALE_RX_CMB_SZ, 1, |
778 | ALE_RX_CMB_SZ, 0, BUS_DMA_NOWAIT, |
779 | &sc->ale_cdata.ale_rx_page[i].cmb_map); |
780 | if (error) { |
781 | sc->ale_cdata.ale_rx_page[i].cmb_map = NULL; |
782 | return ENOBUFS; |
783 | } |
784 | |
785 | /* Allocate DMA'able memory for Rx CMB */ |
786 | error = bus_dmamem_alloc(sc->sc_dmat, ALE_RX_CMB_SZ, |
787 | ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1, |
788 | &nsegs, BUS_DMA_WAITOK); |
789 | if (error) { |
790 | printf("%s: could not allocate DMA'able memory for " |
791 | "Rx CMB\n" , device_xname(sc->sc_dev)); |
792 | return error; |
793 | } |
794 | error = bus_dmamem_map(sc->sc_dmat, |
795 | &sc->ale_cdata.ale_rx_page[i].cmb_seg, nsegs, |
796 | ALE_RX_CMB_SZ, |
797 | (void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr, |
798 | BUS_DMA_NOWAIT); |
799 | if (error) |
800 | return ENOBUFS; |
801 | |
802 | memset(sc->ale_cdata.ale_rx_page[i].cmb_addr, 0, ALE_RX_CMB_SZ); |
803 | |
804 | /* Load the DMA map for Rx CMB */ |
805 | error = bus_dmamap_load(sc->sc_dmat, |
806 | sc->ale_cdata.ale_rx_page[i].cmb_map, |
807 | sc->ale_cdata.ale_rx_page[i].cmb_addr, |
808 | ALE_RX_CMB_SZ, NULL, BUS_DMA_WAITOK); |
809 | if (error) { |
810 | printf("%s: could not load DMA'able memory for Rx CMB" |
811 | "\n" , device_xname(sc->sc_dev)); |
812 | bus_dmamem_free(sc->sc_dmat, |
813 | &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1); |
814 | return error; |
815 | } |
816 | sc->ale_cdata.ale_rx_page[i].cmb_paddr = |
817 | sc->ale_cdata.ale_rx_page[i].cmb_map->dm_segs[0].ds_addr; |
818 | } |
819 | |
820 | |
821 | /* Create DMA maps for Tx buffers. */ |
822 | for (i = 0; i < ALE_TX_RING_CNT; i++) { |
823 | txd = &sc->ale_cdata.ale_txdesc[i]; |
824 | txd->tx_m = NULL; |
825 | txd->tx_dmamap = NULL; |
826 | error = bus_dmamap_create(sc->sc_dmat, ALE_TSO_MAXSIZE, |
827 | ALE_MAXTXSEGS, ALE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, |
828 | &txd->tx_dmamap); |
829 | if (error) { |
830 | txd->tx_dmamap = NULL; |
831 | printf("%s: could not create Tx dmamap.\n" , |
832 | device_xname(sc->sc_dev)); |
833 | return error; |
834 | } |
835 | } |
836 | |
837 | return 0; |
838 | } |
839 | |
840 | static void |
841 | ale_dma_free(struct ale_softc *sc) |
842 | { |
843 | struct ale_txdesc *txd; |
844 | int i; |
845 | |
846 | /* Tx buffers. */ |
847 | for (i = 0; i < ALE_TX_RING_CNT; i++) { |
848 | txd = &sc->ale_cdata.ale_txdesc[i]; |
849 | if (txd->tx_dmamap != NULL) { |
850 | bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); |
851 | txd->tx_dmamap = NULL; |
852 | } |
853 | } |
854 | |
855 | /* Tx descriptor ring. */ |
856 | if (sc->ale_cdata.ale_tx_ring_map != NULL) |
857 | bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map); |
858 | if (sc->ale_cdata.ale_tx_ring_map != NULL && |
859 | sc->ale_cdata.ale_tx_ring != NULL) |
860 | bus_dmamem_free(sc->sc_dmat, |
861 | &sc->ale_cdata.ale_tx_ring_seg, 1); |
862 | sc->ale_cdata.ale_tx_ring = NULL; |
863 | sc->ale_cdata.ale_tx_ring_map = NULL; |
864 | |
865 | /* Rx page block. */ |
866 | for (i = 0; i < ALE_RX_PAGES; i++) { |
867 | if (sc->ale_cdata.ale_rx_page[i].page_map != NULL) |
868 | bus_dmamap_unload(sc->sc_dmat, |
869 | sc->ale_cdata.ale_rx_page[i].page_map); |
870 | if (sc->ale_cdata.ale_rx_page[i].page_map != NULL && |
871 | sc->ale_cdata.ale_rx_page[i].page_addr != NULL) |
872 | bus_dmamem_free(sc->sc_dmat, |
873 | &sc->ale_cdata.ale_rx_page[i].page_seg, 1); |
874 | sc->ale_cdata.ale_rx_page[i].page_addr = NULL; |
875 | sc->ale_cdata.ale_rx_page[i].page_map = NULL; |
876 | } |
877 | |
878 | /* Rx CMB. */ |
879 | for (i = 0; i < ALE_RX_PAGES; i++) { |
880 | if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL) |
881 | bus_dmamap_unload(sc->sc_dmat, |
882 | sc->ale_cdata.ale_rx_page[i].cmb_map); |
883 | if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL && |
884 | sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL) |
885 | bus_dmamem_free(sc->sc_dmat, |
886 | &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1); |
887 | sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL; |
888 | sc->ale_cdata.ale_rx_page[i].cmb_map = NULL; |
889 | } |
890 | |
891 | /* Tx CMB. */ |
892 | if (sc->ale_cdata.ale_tx_cmb_map != NULL) |
893 | bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map); |
894 | if (sc->ale_cdata.ale_tx_cmb_map != NULL && |
895 | sc->ale_cdata.ale_tx_cmb != NULL) |
896 | bus_dmamem_free(sc->sc_dmat, |
897 | &sc->ale_cdata.ale_tx_cmb_seg, 1); |
898 | sc->ale_cdata.ale_tx_cmb = NULL; |
899 | sc->ale_cdata.ale_tx_cmb_map = NULL; |
900 | |
901 | } |
902 | |
903 | static int |
904 | ale_encap(struct ale_softc *sc, struct mbuf **m_head) |
905 | { |
906 | struct ale_txdesc *txd, *txd_last; |
907 | struct tx_desc *desc; |
908 | struct mbuf *m; |
909 | bus_dmamap_t map; |
910 | uint32_t cflags, poff, vtag; |
911 | int error, i, nsegs, prod; |
912 | #if NVLAN > 0 |
913 | struct m_tag *mtag; |
914 | #endif |
915 | |
916 | m = *m_head; |
917 | cflags = vtag = 0; |
918 | poff = 0; |
919 | |
920 | prod = sc->ale_cdata.ale_tx_prod; |
921 | txd = &sc->ale_cdata.ale_txdesc[prod]; |
922 | txd_last = txd; |
923 | map = txd->tx_dmamap; |
924 | |
925 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); |
926 | if (error == EFBIG) { |
927 | error = 0; |
928 | |
929 | *m_head = m_pullup(*m_head, MHLEN); |
930 | if (*m_head == NULL) { |
931 | printf("%s: can't defrag TX mbuf\n" , |
932 | device_xname(sc->sc_dev)); |
933 | return ENOBUFS; |
934 | } |
935 | |
936 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, |
937 | BUS_DMA_NOWAIT); |
938 | |
939 | if (error != 0) { |
940 | printf("%s: could not load defragged TX mbuf\n" , |
941 | device_xname(sc->sc_dev)); |
942 | m_freem(*m_head); |
943 | *m_head = NULL; |
944 | return error; |
945 | } |
946 | } else if (error) { |
947 | printf("%s: could not load TX mbuf\n" , device_xname(sc->sc_dev)); |
948 | return error; |
949 | } |
950 | |
951 | nsegs = map->dm_nsegs; |
952 | |
953 | if (nsegs == 0) { |
954 | m_freem(*m_head); |
955 | *m_head = NULL; |
956 | return EIO; |
957 | } |
958 | |
959 | /* Check descriptor overrun. */ |
960 | if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 2) { |
961 | bus_dmamap_unload(sc->sc_dmat, map); |
962 | return ENOBUFS; |
963 | } |
964 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, |
965 | BUS_DMASYNC_PREWRITE); |
966 | |
967 | m = *m_head; |
968 | /* Configure Tx checksum offload. */ |
969 | if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) { |
970 | /* |
971 | * AR81xx supports Tx custom checksum offload feature |
972 | * that offloads single 16bit checksum computation. |
973 | * So you can choose one among IP, TCP and UDP. |
974 | * Normally driver sets checksum start/insertion |
975 | * position from the information of TCP/UDP frame as |
976 | * TCP/UDP checksum takes more time than that of IP. |
977 | * However it seems that custom checksum offload |
978 | * requires 4 bytes aligned Tx buffers due to hardware |
979 | * bug. |
980 | * AR81xx also supports explicit Tx checksum computation |
981 | * if it is told that the size of IP header and TCP |
982 | * header(for UDP, the header size does not matter |
983 | * because it's fixed length). However with this scheme |
984 | * TSO does not work so you have to choose one either |
985 | * TSO or explicit Tx checksum offload. I chosen TSO |
986 | * plus custom checksum offload with work-around which |
987 | * will cover most common usage for this consumer |
988 | * ethernet controller. The work-around takes a lot of |
989 | * CPU cycles if Tx buffer is not aligned on 4 bytes |
990 | * boundary, though. |
991 | */ |
992 | cflags |= ALE_TD_CXSUM; |
993 | /* Set checksum start offset. */ |
994 | cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT); |
995 | } |
996 | |
997 | #if NVLAN > 0 |
998 | /* Configure VLAN hardware tag insertion. */ |
999 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) { |
1000 | vtag = ALE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag))); |
1001 | vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK); |
1002 | cflags |= ALE_TD_INSERT_VLAN_TAG; |
1003 | } |
1004 | #endif |
1005 | |
1006 | desc = NULL; |
1007 | for (i = 0; i < nsegs; i++) { |
1008 | desc = &sc->ale_cdata.ale_tx_ring[prod]; |
1009 | desc->addr = htole64(map->dm_segs[i].ds_addr); |
1010 | desc->len = |
1011 | htole32(ALE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); |
1012 | desc->flags = htole32(cflags); |
1013 | sc->ale_cdata.ale_tx_cnt++; |
1014 | ALE_DESC_INC(prod, ALE_TX_RING_CNT); |
1015 | } |
1016 | /* Update producer index. */ |
1017 | sc->ale_cdata.ale_tx_prod = prod; |
1018 | |
1019 | /* Finally set EOP on the last descriptor. */ |
1020 | prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT; |
1021 | desc = &sc->ale_cdata.ale_tx_ring[prod]; |
1022 | desc->flags |= htole32(ALE_TD_EOP); |
1023 | |
1024 | /* Swap dmamap of the first and the last. */ |
1025 | txd = &sc->ale_cdata.ale_txdesc[prod]; |
1026 | map = txd_last->tx_dmamap; |
1027 | txd_last->tx_dmamap = txd->tx_dmamap; |
1028 | txd->tx_dmamap = map; |
1029 | txd->tx_m = m; |
1030 | |
1031 | /* Sync descriptors. */ |
1032 | bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0, |
1033 | sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1034 | |
1035 | return 0; |
1036 | } |
1037 | |
1038 | static void |
1039 | ale_start(struct ifnet *ifp) |
1040 | { |
1041 | struct ale_softc *sc = ifp->if_softc; |
1042 | struct mbuf *m_head; |
1043 | int enq; |
1044 | |
1045 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
1046 | return; |
1047 | |
1048 | /* Reclaim transmitted frames. */ |
1049 | if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT) |
1050 | ale_txeof(sc); |
1051 | |
1052 | enq = 0; |
1053 | for (;;) { |
1054 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
1055 | if (m_head == NULL) |
1056 | break; |
1057 | |
1058 | /* |
1059 | * Pack the data into the transmit ring. If we |
1060 | * don't have room, set the OACTIVE flag and wait |
1061 | * for the NIC to drain the ring. |
1062 | */ |
1063 | if (ale_encap(sc, &m_head)) { |
1064 | if (m_head == NULL) |
1065 | break; |
1066 | IF_PREPEND(&ifp->if_snd, m_head); |
1067 | ifp->if_flags |= IFF_OACTIVE; |
1068 | break; |
1069 | } |
1070 | enq = 1; |
1071 | |
1072 | /* |
1073 | * If there's a BPF listener, bounce a copy of this frame |
1074 | * to him. |
1075 | */ |
1076 | bpf_mtap(ifp, m_head); |
1077 | } |
1078 | |
1079 | if (enq) { |
1080 | /* Kick. */ |
1081 | CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX, |
1082 | sc->ale_cdata.ale_tx_prod); |
1083 | |
1084 | /* Set a timeout in case the chip goes out to lunch. */ |
1085 | ifp->if_timer = ALE_TX_TIMEOUT; |
1086 | } |
1087 | } |
1088 | |
1089 | static void |
1090 | ale_watchdog(struct ifnet *ifp) |
1091 | { |
1092 | struct ale_softc *sc = ifp->if_softc; |
1093 | |
1094 | if ((sc->ale_flags & ALE_FLAG_LINK) == 0) { |
1095 | printf("%s: watchdog timeout (missed link)\n" , |
1096 | device_xname(sc->sc_dev)); |
1097 | ifp->if_oerrors++; |
1098 | ale_init(ifp); |
1099 | return; |
1100 | } |
1101 | |
1102 | printf("%s: watchdog timeout\n" , device_xname(sc->sc_dev)); |
1103 | ifp->if_oerrors++; |
1104 | ale_init(ifp); |
1105 | |
1106 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
1107 | ale_start(ifp); |
1108 | } |
1109 | |
1110 | static int |
1111 | ale_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
1112 | { |
1113 | struct ale_softc *sc = ifp->if_softc; |
1114 | int s, error; |
1115 | |
1116 | s = splnet(); |
1117 | |
1118 | error = ether_ioctl(ifp, cmd, data); |
1119 | if (error == ENETRESET) { |
1120 | if (ifp->if_flags & IFF_RUNNING) |
1121 | ale_rxfilter(sc); |
1122 | error = 0; |
1123 | } |
1124 | |
1125 | splx(s); |
1126 | return error; |
1127 | } |
1128 | |
1129 | static void |
1130 | ale_mac_config(struct ale_softc *sc) |
1131 | { |
1132 | struct mii_data *mii; |
1133 | uint32_t reg; |
1134 | |
1135 | mii = &sc->sc_miibus; |
1136 | reg = CSR_READ_4(sc, ALE_MAC_CFG); |
1137 | reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | |
1138 | MAC_CFG_SPEED_MASK); |
1139 | |
1140 | /* Reprogram MAC with resolved speed/duplex. */ |
1141 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
1142 | case IFM_10_T: |
1143 | case IFM_100_TX: |
1144 | reg |= MAC_CFG_SPEED_10_100; |
1145 | break; |
1146 | case IFM_1000_T: |
1147 | reg |= MAC_CFG_SPEED_1000; |
1148 | break; |
1149 | } |
1150 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { |
1151 | reg |= MAC_CFG_FULL_DUPLEX; |
1152 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) |
1153 | reg |= MAC_CFG_TX_FC; |
1154 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) |
1155 | reg |= MAC_CFG_RX_FC; |
1156 | } |
1157 | CSR_WRITE_4(sc, ALE_MAC_CFG, reg); |
1158 | } |
1159 | |
1160 | static void |
1161 | ale_stats_clear(struct ale_softc *sc) |
1162 | { |
1163 | struct smb sb; |
1164 | uint32_t *reg; |
1165 | int i; |
1166 | |
1167 | for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { |
1168 | CSR_READ_4(sc, ALE_RX_MIB_BASE + i); |
1169 | i += sizeof(uint32_t); |
1170 | } |
1171 | /* Read Tx statistics. */ |
1172 | for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { |
1173 | CSR_READ_4(sc, ALE_TX_MIB_BASE + i); |
1174 | i += sizeof(uint32_t); |
1175 | } |
1176 | } |
1177 | |
1178 | static void |
1179 | ale_stats_update(struct ale_softc *sc) |
1180 | { |
1181 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1182 | struct ale_hw_stats *stat; |
1183 | struct smb sb, *smb; |
1184 | uint32_t *reg; |
1185 | int i; |
1186 | |
1187 | stat = &sc->ale_stats; |
1188 | smb = &sb; |
1189 | |
1190 | /* Read Rx statistics. */ |
1191 | for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { |
1192 | *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i); |
1193 | i += sizeof(uint32_t); |
1194 | } |
1195 | /* Read Tx statistics. */ |
1196 | for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { |
1197 | *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i); |
1198 | i += sizeof(uint32_t); |
1199 | } |
1200 | |
1201 | /* Rx stats. */ |
1202 | stat->rx_frames += smb->rx_frames; |
1203 | stat->rx_bcast_frames += smb->rx_bcast_frames; |
1204 | stat->rx_mcast_frames += smb->rx_mcast_frames; |
1205 | stat->rx_pause_frames += smb->rx_pause_frames; |
1206 | stat->rx_control_frames += smb->rx_control_frames; |
1207 | stat->rx_crcerrs += smb->rx_crcerrs; |
1208 | stat->rx_lenerrs += smb->rx_lenerrs; |
1209 | stat->rx_bytes += smb->rx_bytes; |
1210 | stat->rx_runts += smb->rx_runts; |
1211 | stat->rx_fragments += smb->rx_fragments; |
1212 | stat->rx_pkts_64 += smb->rx_pkts_64; |
1213 | stat->rx_pkts_65_127 += smb->rx_pkts_65_127; |
1214 | stat->rx_pkts_128_255 += smb->rx_pkts_128_255; |
1215 | stat->rx_pkts_256_511 += smb->rx_pkts_256_511; |
1216 | stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; |
1217 | stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; |
1218 | stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; |
1219 | stat->rx_pkts_truncated += smb->rx_pkts_truncated; |
1220 | stat->rx_fifo_oflows += smb->rx_fifo_oflows; |
1221 | stat->rx_rrs_errs += smb->rx_rrs_errs; |
1222 | stat->rx_alignerrs += smb->rx_alignerrs; |
1223 | stat->rx_bcast_bytes += smb->rx_bcast_bytes; |
1224 | stat->rx_mcast_bytes += smb->rx_mcast_bytes; |
1225 | stat->rx_pkts_filtered += smb->rx_pkts_filtered; |
1226 | |
1227 | /* Tx stats. */ |
1228 | stat->tx_frames += smb->tx_frames; |
1229 | stat->tx_bcast_frames += smb->tx_bcast_frames; |
1230 | stat->tx_mcast_frames += smb->tx_mcast_frames; |
1231 | stat->tx_pause_frames += smb->tx_pause_frames; |
1232 | stat->tx_excess_defer += smb->tx_excess_defer; |
1233 | stat->tx_control_frames += smb->tx_control_frames; |
1234 | stat->tx_deferred += smb->tx_deferred; |
1235 | stat->tx_bytes += smb->tx_bytes; |
1236 | stat->tx_pkts_64 += smb->tx_pkts_64; |
1237 | stat->tx_pkts_65_127 += smb->tx_pkts_65_127; |
1238 | stat->tx_pkts_128_255 += smb->tx_pkts_128_255; |
1239 | stat->tx_pkts_256_511 += smb->tx_pkts_256_511; |
1240 | stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; |
1241 | stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; |
1242 | stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; |
1243 | stat->tx_single_colls += smb->tx_single_colls; |
1244 | stat->tx_multi_colls += smb->tx_multi_colls; |
1245 | stat->tx_late_colls += smb->tx_late_colls; |
1246 | stat->tx_excess_colls += smb->tx_excess_colls; |
1247 | stat->tx_abort += smb->tx_abort; |
1248 | stat->tx_underrun += smb->tx_underrun; |
1249 | stat->tx_desc_underrun += smb->tx_desc_underrun; |
1250 | stat->tx_lenerrs += smb->tx_lenerrs; |
1251 | stat->tx_pkts_truncated += smb->tx_pkts_truncated; |
1252 | stat->tx_bcast_bytes += smb->tx_bcast_bytes; |
1253 | stat->tx_mcast_bytes += smb->tx_mcast_bytes; |
1254 | |
1255 | /* Update counters in ifnet. */ |
1256 | ifp->if_opackets += smb->tx_frames; |
1257 | |
1258 | ifp->if_collisions += smb->tx_single_colls + |
1259 | smb->tx_multi_colls * 2 + smb->tx_late_colls + |
1260 | smb->tx_abort * HDPX_CFG_RETRY_DEFAULT; |
1261 | |
1262 | /* |
1263 | * XXX |
1264 | * tx_pkts_truncated counter looks suspicious. It constantly |
1265 | * increments with no sign of Tx errors. This may indicate |
1266 | * the counter name is not correct one so I've removed the |
1267 | * counter in output errors. |
1268 | */ |
1269 | ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls + |
1270 | smb->tx_underrun; |
1271 | |
1272 | ifp->if_ipackets += smb->rx_frames; |
1273 | |
1274 | ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + |
1275 | smb->rx_runts + smb->rx_pkts_truncated + |
1276 | smb->rx_fifo_oflows + smb->rx_rrs_errs + |
1277 | smb->rx_alignerrs; |
1278 | } |
1279 | |
1280 | static int |
1281 | ale_intr(void *xsc) |
1282 | { |
1283 | struct ale_softc *sc = xsc; |
1284 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1285 | uint32_t status; |
1286 | |
1287 | status = CSR_READ_4(sc, ALE_INTR_STATUS); |
1288 | if ((status & ALE_INTRS) == 0) |
1289 | return 0; |
1290 | |
1291 | /* Acknowledge and disable interrupts. */ |
1292 | CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT); |
1293 | |
1294 | if (ifp->if_flags & IFF_RUNNING) { |
1295 | int error; |
1296 | |
1297 | error = ale_rxeof(sc); |
1298 | if (error) { |
1299 | sc->ale_stats.reset_brk_seq++; |
1300 | ale_init(ifp); |
1301 | return 0; |
1302 | } |
1303 | |
1304 | if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { |
1305 | if (status & INTR_DMA_RD_TO_RST) |
1306 | printf("%s: DMA read error! -- resetting\n" , |
1307 | device_xname(sc->sc_dev)); |
1308 | if (status & INTR_DMA_WR_TO_RST) |
1309 | printf("%s: DMA write error! -- resetting\n" , |
1310 | device_xname(sc->sc_dev)); |
1311 | ale_init(ifp); |
1312 | return 0; |
1313 | } |
1314 | |
1315 | ale_txeof(sc); |
1316 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
1317 | ale_start(ifp); |
1318 | } |
1319 | |
1320 | /* Re-enable interrupts. */ |
1321 | CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF); |
1322 | return 1; |
1323 | } |
1324 | |
1325 | static void |
1326 | ale_txeof(struct ale_softc *sc) |
1327 | { |
1328 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1329 | struct ale_txdesc *txd; |
1330 | uint32_t cons, prod; |
1331 | int prog; |
1332 | |
1333 | if (sc->ale_cdata.ale_tx_cnt == 0) |
1334 | return; |
1335 | |
1336 | bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0, |
1337 | sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1338 | if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) { |
1339 | bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0, |
1340 | sc->ale_cdata.ale_tx_cmb_map->dm_mapsize, |
1341 | BUS_DMASYNC_POSTREAD); |
1342 | prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK; |
1343 | } else |
1344 | prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX); |
1345 | cons = sc->ale_cdata.ale_tx_cons; |
1346 | /* |
1347 | * Go through our Tx list and free mbufs for those |
1348 | * frames which have been transmitted. |
1349 | */ |
1350 | for (prog = 0; cons != prod; prog++, |
1351 | ALE_DESC_INC(cons, ALE_TX_RING_CNT)) { |
1352 | if (sc->ale_cdata.ale_tx_cnt <= 0) |
1353 | break; |
1354 | prog++; |
1355 | ifp->if_flags &= ~IFF_OACTIVE; |
1356 | sc->ale_cdata.ale_tx_cnt--; |
1357 | txd = &sc->ale_cdata.ale_txdesc[cons]; |
1358 | if (txd->tx_m != NULL) { |
1359 | /* Reclaim transmitted mbufs. */ |
1360 | bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); |
1361 | m_freem(txd->tx_m); |
1362 | txd->tx_m = NULL; |
1363 | } |
1364 | } |
1365 | |
1366 | if (prog > 0) { |
1367 | sc->ale_cdata.ale_tx_cons = cons; |
1368 | /* |
1369 | * Unarm watchdog timer only when there is no pending |
1370 | * Tx descriptors in queue. |
1371 | */ |
1372 | if (sc->ale_cdata.ale_tx_cnt == 0) |
1373 | ifp->if_timer = 0; |
1374 | } |
1375 | } |
1376 | |
1377 | static void |
1378 | ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page, |
1379 | uint32_t length, uint32_t *prod) |
1380 | { |
1381 | struct ale_rx_page *rx_page; |
1382 | |
1383 | rx_page = *page; |
1384 | /* Update consumer position. */ |
1385 | rx_page->cons += roundup(length + sizeof(struct rx_rs), |
1386 | ALE_RX_PAGE_ALIGN); |
1387 | if (rx_page->cons >= ALE_RX_PAGE_SZ) { |
1388 | /* |
1389 | * End of Rx page reached, let hardware reuse |
1390 | * this page. |
1391 | */ |
1392 | rx_page->cons = 0; |
1393 | *rx_page->cmb_addr = 0; |
1394 | bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, |
1395 | rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1396 | CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp, |
1397 | RXF_VALID); |
1398 | /* Switch to alternate Rx page. */ |
1399 | sc->ale_cdata.ale_rx_curp ^= 1; |
1400 | rx_page = *page = |
1401 | &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; |
1402 | /* Page flipped, sync CMB and Rx page. */ |
1403 | bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0, |
1404 | rx_page->page_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1405 | bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, |
1406 | rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1407 | /* Sync completed, cache updated producer index. */ |
1408 | *prod = *rx_page->cmb_addr; |
1409 | } |
1410 | } |
1411 | |
1412 | |
1413 | /* |
1414 | * It seems that AR81xx controller can compute partial checksum. |
1415 | * The partial checksum value can be used to accelerate checksum |
1416 | * computation for fragmented TCP/UDP packets. Upper network stack |
1417 | * already takes advantage of the partial checksum value in IP |
1418 | * reassembly stage. But I'm not sure the correctness of the |
1419 | * partial hardware checksum assistance due to lack of data sheet. |
1420 | * In addition, the Rx feature of controller that requires copying |
1421 | * for every frames effectively nullifies one of most nice offload |
1422 | * capability of controller. |
1423 | */ |
1424 | static void |
1425 | ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status) |
1426 | { |
1427 | if (status & ALE_RD_IPCSUM_NOK) |
1428 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; |
1429 | |
1430 | if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) { |
1431 | if (((status & ALE_RD_IPV4_FRAG) == 0) && |
1432 | ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) && |
1433 | (status & ALE_RD_TCP_UDPCSUM_NOK)) |
1434 | { |
1435 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; |
1436 | } |
1437 | } else { |
1438 | if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) { |
1439 | if (status & ALE_RD_TCP_UDPCSUM_NOK) { |
1440 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; |
1441 | } |
1442 | } |
1443 | } |
1444 | /* |
1445 | * Don't mark bad checksum for TCP/UDP frames |
1446 | * as fragmented frames may always have set |
1447 | * bad checksummed bit of frame status. |
1448 | */ |
1449 | } |
1450 | |
1451 | /* Process received frames. */ |
1452 | static int |
1453 | ale_rxeof(struct ale_softc *sc) |
1454 | { |
1455 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1456 | struct ale_rx_page *rx_page; |
1457 | struct rx_rs *rs; |
1458 | struct mbuf *m; |
1459 | uint32_t length, prod, seqno, status; |
1460 | int prog; |
1461 | |
1462 | rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; |
1463 | bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, |
1464 | rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1465 | bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0, |
1466 | rx_page->page_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1467 | /* |
1468 | * Don't directly access producer index as hardware may |
1469 | * update it while Rx handler is in progress. It would |
1470 | * be even better if there is a way to let hardware |
1471 | * know how far driver processed its received frames. |
1472 | * Alternatively, hardware could provide a way to disable |
1473 | * CMB updates until driver acknowledges the end of CMB |
1474 | * access. |
1475 | */ |
1476 | prod = *rx_page->cmb_addr; |
1477 | for (prog = 0; ; prog++) { |
1478 | if (rx_page->cons >= prod) |
1479 | break; |
1480 | rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons); |
1481 | seqno = ALE_RX_SEQNO(le32toh(rs->seqno)); |
1482 | if (sc->ale_cdata.ale_rx_seqno != seqno) { |
1483 | /* |
1484 | * Normally I believe this should not happen unless |
1485 | * severe driver bug or corrupted memory. However |
1486 | * it seems to happen under certain conditions which |
1487 | * is triggered by abrupt Rx events such as initiation |
1488 | * of bulk transfer of remote host. It's not easy to |
1489 | * reproduce this and I doubt it could be related |
1490 | * with FIFO overflow of hardware or activity of Tx |
1491 | * CMB updates. I also remember similar behaviour |
1492 | * seen on RealTek 8139 which uses resembling Rx |
1493 | * scheme. |
1494 | */ |
1495 | if (aledebug) |
1496 | printf("%s: garbled seq: %u, expected: %u -- " |
1497 | "resetting!\n" , device_xname(sc->sc_dev), |
1498 | seqno, sc->ale_cdata.ale_rx_seqno); |
1499 | return EIO; |
1500 | } |
1501 | /* Frame received. */ |
1502 | sc->ale_cdata.ale_rx_seqno++; |
1503 | length = ALE_RX_BYTES(le32toh(rs->length)); |
1504 | status = le32toh(rs->flags); |
1505 | if (status & ALE_RD_ERROR) { |
1506 | /* |
1507 | * We want to pass the following frames to upper |
1508 | * layer regardless of error status of Rx return |
1509 | * status. |
1510 | * |
1511 | * o IP/TCP/UDP checksum is bad. |
1512 | * o frame length and protocol specific length |
1513 | * does not match. |
1514 | */ |
1515 | if (status & (ALE_RD_CRC | ALE_RD_CODE | |
1516 | ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW | |
1517 | ALE_RD_TRUNC)) { |
1518 | ale_rx_update_page(sc, &rx_page, length, &prod); |
1519 | continue; |
1520 | } |
1521 | } |
1522 | /* |
1523 | * m_devget(9) is major bottle-neck of ale(4)(It comes |
1524 | * from hardware limitation). For jumbo frames we could |
1525 | * get a slightly better performance if driver use |
1526 | * m_getjcl(9) with proper buffer size argument. However |
1527 | * that would make code more complicated and I don't |
1528 | * think users would expect good Rx performance numbers |
1529 | * on these low-end consumer ethernet controller. |
1530 | */ |
1531 | m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN, |
1532 | 0, ifp, NULL); |
1533 | if (m == NULL) { |
1534 | ifp->if_iqdrops++; |
1535 | ale_rx_update_page(sc, &rx_page, length, &prod); |
1536 | continue; |
1537 | } |
1538 | if (status & ALE_RD_IPV4) |
1539 | ale_rxcsum(sc, m, status); |
1540 | #if NVLAN > 0 |
1541 | if (status & ALE_RD_VLAN) { |
1542 | uint32_t vtags = ALE_RX_VLAN(le32toh(rs->vtags)); |
1543 | VLAN_INPUT_TAG(ifp, m, ALE_RX_VLAN_TAG(vtags), ); |
1544 | } |
1545 | #endif |
1546 | |
1547 | |
1548 | bpf_mtap(ifp, m); |
1549 | |
1550 | /* Pass it to upper layer. */ |
1551 | if_percpuq_enqueue(ifp->if_percpuq, m); |
1552 | |
1553 | ale_rx_update_page(sc, &rx_page, length, &prod); |
1554 | } |
1555 | |
1556 | return 0; |
1557 | } |
1558 | |
1559 | static void |
1560 | ale_tick(void *xsc) |
1561 | { |
1562 | struct ale_softc *sc = xsc; |
1563 | struct mii_data *mii = &sc->sc_miibus; |
1564 | int s; |
1565 | |
1566 | s = splnet(); |
1567 | mii_tick(mii); |
1568 | ale_stats_update(sc); |
1569 | splx(s); |
1570 | |
1571 | callout_schedule(&sc->sc_tick_ch, hz); |
1572 | } |
1573 | |
1574 | static void |
1575 | ale_reset(struct ale_softc *sc) |
1576 | { |
1577 | uint32_t reg; |
1578 | int i; |
1579 | |
1580 | /* Initialize PCIe module. From Linux. */ |
1581 | CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); |
1582 | |
1583 | CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET); |
1584 | for (i = ALE_RESET_TIMEOUT; i > 0; i--) { |
1585 | DELAY(10); |
1586 | if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0) |
1587 | break; |
1588 | } |
1589 | if (i == 0) |
1590 | printf("%s: master reset timeout!\n" , device_xname(sc->sc_dev)); |
1591 | |
1592 | for (i = ALE_RESET_TIMEOUT; i > 0; i--) { |
1593 | if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0) |
1594 | break; |
1595 | DELAY(10); |
1596 | } |
1597 | |
1598 | if (i == 0) |
1599 | printf("%s: reset timeout(0x%08x)!\n" , device_xname(sc->sc_dev), |
1600 | reg); |
1601 | } |
1602 | |
1603 | static int |
1604 | ale_init(struct ifnet *ifp) |
1605 | { |
1606 | struct ale_softc *sc = ifp->if_softc; |
1607 | struct mii_data *mii; |
1608 | uint8_t eaddr[ETHER_ADDR_LEN]; |
1609 | bus_addr_t paddr; |
1610 | uint32_t reg, rxf_hi, rxf_lo; |
1611 | |
1612 | /* |
1613 | * Cancel any pending I/O. |
1614 | */ |
1615 | ale_stop(ifp, 0); |
1616 | |
1617 | /* |
1618 | * Reset the chip to a known state. |
1619 | */ |
1620 | ale_reset(sc); |
1621 | |
1622 | /* Initialize Tx descriptors, DMA memory blocks. */ |
1623 | ale_init_rx_pages(sc); |
1624 | ale_init_tx_ring(sc); |
1625 | |
1626 | /* Reprogram the station address. */ |
1627 | memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); |
1628 | CSR_WRITE_4(sc, ALE_PAR0, |
1629 | eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); |
1630 | CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]); |
1631 | |
1632 | /* |
1633 | * Clear WOL status and disable all WOL feature as WOL |
1634 | * would interfere Rx operation under normal environments. |
1635 | */ |
1636 | CSR_READ_4(sc, ALE_WOL_CFG); |
1637 | CSR_WRITE_4(sc, ALE_WOL_CFG, 0); |
1638 | |
1639 | /* |
1640 | * Set Tx descriptor/RXF0/CMB base addresses. They share |
1641 | * the same high address part of DMAable region. |
1642 | */ |
1643 | paddr = sc->ale_cdata.ale_tx_ring_paddr; |
1644 | CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr)); |
1645 | CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr)); |
1646 | CSR_WRITE_4(sc, ALE_TPD_CNT, |
1647 | (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK); |
1648 | |
1649 | /* Set Rx page base address, note we use single queue. */ |
1650 | paddr = sc->ale_cdata.ale_rx_page[0].page_paddr; |
1651 | CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr)); |
1652 | paddr = sc->ale_cdata.ale_rx_page[1].page_paddr; |
1653 | CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr)); |
1654 | |
1655 | /* Set Tx/Rx CMB addresses. */ |
1656 | paddr = sc->ale_cdata.ale_tx_cmb_paddr; |
1657 | CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr)); |
1658 | paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr; |
1659 | CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr)); |
1660 | paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr; |
1661 | CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr)); |
1662 | |
1663 | /* Mark RXF0 is valid. */ |
1664 | CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID); |
1665 | CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID); |
1666 | /* |
1667 | * No need to initialize RFX1/RXF2/RXF3. We don't use |
1668 | * multi-queue yet. |
1669 | */ |
1670 | |
1671 | /* Set Rx page size, excluding guard frame size. */ |
1672 | CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ); |
1673 | |
1674 | /* Tell hardware that we're ready to load DMA blocks. */ |
1675 | CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD); |
1676 | |
1677 | /* Set Rx/Tx interrupt trigger threshold. */ |
1678 | CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) | |
1679 | (4 << INT_TRIG_TX_THRESH_SHIFT)); |
1680 | /* |
1681 | * XXX |
1682 | * Set interrupt trigger timer, its purpose and relation |
1683 | * with interrupt moderation mechanism is not clear yet. |
1684 | */ |
1685 | CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER, |
1686 | ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) | |
1687 | (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT))); |
1688 | |
1689 | /* Configure interrupt moderation timer. */ |
1690 | sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; |
1691 | sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; |
1692 | reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT; |
1693 | reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT; |
1694 | CSR_WRITE_4(sc, ALE_IM_TIMER, reg); |
1695 | reg = CSR_READ_4(sc, ALE_MASTER_CFG); |
1696 | reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK); |
1697 | reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); |
1698 | if (ALE_USECS(sc->ale_int_rx_mod) != 0) |
1699 | reg |= MASTER_IM_RX_TIMER_ENB; |
1700 | if (ALE_USECS(sc->ale_int_tx_mod) != 0) |
1701 | reg |= MASTER_IM_TX_TIMER_ENB; |
1702 | CSR_WRITE_4(sc, ALE_MASTER_CFG, reg); |
1703 | CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000)); |
1704 | |
1705 | /* Set Maximum frame size of controller. */ |
1706 | if (ifp->if_mtu < ETHERMTU) |
1707 | sc->ale_max_frame_size = ETHERMTU; |
1708 | else |
1709 | sc->ale_max_frame_size = ifp->if_mtu; |
1710 | sc->ale_max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; |
1711 | CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size); |
1712 | |
1713 | /* Configure IPG/IFG parameters. */ |
1714 | CSR_WRITE_4(sc, ALE_IPG_IFG_CFG, |
1715 | ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | |
1716 | ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | |
1717 | ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | |
1718 | ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); |
1719 | |
1720 | /* Set parameters for half-duplex media. */ |
1721 | CSR_WRITE_4(sc, ALE_HDPX_CFG, |
1722 | ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & |
1723 | HDPX_CFG_LCOL_MASK) | |
1724 | ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & |
1725 | HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | |
1726 | ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & |
1727 | HDPX_CFG_ABEBT_MASK) | |
1728 | ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & |
1729 | HDPX_CFG_JAMIPG_MASK)); |
1730 | |
1731 | /* Configure Tx jumbo frame parameters. */ |
1732 | if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { |
1733 | if (ifp->if_mtu < ETHERMTU) |
1734 | reg = sc->ale_max_frame_size; |
1735 | else if (ifp->if_mtu < 6 * 1024) |
1736 | reg = (sc->ale_max_frame_size * 2) / 3; |
1737 | else |
1738 | reg = sc->ale_max_frame_size / 2; |
1739 | CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH, |
1740 | roundup(reg, TX_JUMBO_THRESH_UNIT) >> |
1741 | TX_JUMBO_THRESH_UNIT_SHIFT); |
1742 | } |
1743 | |
1744 | /* Configure TxQ. */ |
1745 | reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT)) |
1746 | << TXQ_CFG_TX_FIFO_BURST_SHIFT; |
1747 | reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & |
1748 | TXQ_CFG_TPD_BURST_MASK; |
1749 | CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB); |
1750 | |
1751 | /* Configure Rx jumbo frame & flow control parameters. */ |
1752 | if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { |
1753 | reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT); |
1754 | CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH, |
1755 | (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) << |
1756 | RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) | |
1757 | ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) & |
1758 | RX_JUMBO_LKAH_MASK)); |
1759 | reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); |
1760 | rxf_hi = (reg * 7) / 10; |
1761 | rxf_lo = (reg * 3)/ 10; |
1762 | CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH, |
1763 | ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & |
1764 | RX_FIFO_PAUSE_THRESH_LO_MASK) | |
1765 | ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & |
1766 | RX_FIFO_PAUSE_THRESH_HI_MASK)); |
1767 | } |
1768 | |
1769 | /* Disable RSS. */ |
1770 | CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0); |
1771 | CSR_WRITE_4(sc, ALE_RSS_CPU, 0); |
1772 | |
1773 | /* Configure RxQ. */ |
1774 | CSR_WRITE_4(sc, ALE_RXQ_CFG, |
1775 | RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); |
1776 | |
1777 | /* Configure DMA parameters. */ |
1778 | reg = 0; |
1779 | if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) |
1780 | reg |= DMA_CFG_TXCMB_ENB; |
1781 | CSR_WRITE_4(sc, ALE_DMA_CFG, |
1782 | DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 | |
1783 | sc->ale_dma_rd_burst | reg | |
1784 | sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB | |
1785 | ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & |
1786 | DMA_CFG_RD_DELAY_CNT_MASK) | |
1787 | ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & |
1788 | DMA_CFG_WR_DELAY_CNT_MASK)); |
1789 | |
1790 | /* |
1791 | * Hardware can be configured to issue SMB interrupt based |
1792 | * on programmed interval. Since there is a callout that is |
1793 | * invoked for every hz in driver we use that instead of |
1794 | * relying on periodic SMB interrupt. |
1795 | */ |
1796 | CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0)); |
1797 | |
1798 | /* Clear MAC statistics. */ |
1799 | ale_stats_clear(sc); |
1800 | |
1801 | /* |
1802 | * Configure Tx/Rx MACs. |
1803 | * - Auto-padding for short frames. |
1804 | * - Enable CRC generation. |
1805 | * Actual reconfiguration of MAC for resolved speed/duplex |
1806 | * is followed after detection of link establishment. |
1807 | * AR81xx always does checksum computation regardless of |
1808 | * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will |
1809 | * cause Rx handling issue for fragmented IP datagrams due |
1810 | * to silicon bug. |
1811 | */ |
1812 | reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | |
1813 | ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & |
1814 | MAC_CFG_PREAMBLE_MASK); |
1815 | if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0) |
1816 | reg |= MAC_CFG_SPEED_10_100; |
1817 | else |
1818 | reg |= MAC_CFG_SPEED_1000; |
1819 | CSR_WRITE_4(sc, ALE_MAC_CFG, reg); |
1820 | |
1821 | /* Set up the receive filter. */ |
1822 | ale_rxfilter(sc); |
1823 | ale_rxvlan(sc); |
1824 | |
1825 | /* Acknowledge all pending interrupts and clear it. */ |
1826 | CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS); |
1827 | CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); |
1828 | CSR_WRITE_4(sc, ALE_INTR_STATUS, 0); |
1829 | |
1830 | sc->ale_flags &= ~ALE_FLAG_LINK; |
1831 | |
1832 | /* Switch to the current media. */ |
1833 | mii = &sc->sc_miibus; |
1834 | mii_mediachg(mii); |
1835 | |
1836 | callout_schedule(&sc->sc_tick_ch, hz); |
1837 | |
1838 | ifp->if_flags |= IFF_RUNNING; |
1839 | ifp->if_flags &= ~IFF_OACTIVE; |
1840 | |
1841 | return 0; |
1842 | } |
1843 | |
1844 | static void |
1845 | ale_stop(struct ifnet *ifp, int disable) |
1846 | { |
1847 | struct ale_softc *sc = ifp->if_softc; |
1848 | struct ale_txdesc *txd; |
1849 | uint32_t reg; |
1850 | int i; |
1851 | |
1852 | callout_stop(&sc->sc_tick_ch); |
1853 | |
1854 | /* |
1855 | * Mark the interface down and cancel the watchdog timer. |
1856 | */ |
1857 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1858 | ifp->if_timer = 0; |
1859 | |
1860 | sc->ale_flags &= ~ALE_FLAG_LINK; |
1861 | |
1862 | ale_stats_update(sc); |
1863 | |
1864 | mii_down(&sc->sc_miibus); |
1865 | |
1866 | /* Disable interrupts. */ |
1867 | CSR_WRITE_4(sc, ALE_INTR_MASK, 0); |
1868 | CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); |
1869 | |
1870 | /* Disable queue processing and DMA. */ |
1871 | reg = CSR_READ_4(sc, ALE_TXQ_CFG); |
1872 | reg &= ~TXQ_CFG_ENB; |
1873 | CSR_WRITE_4(sc, ALE_TXQ_CFG, reg); |
1874 | reg = CSR_READ_4(sc, ALE_RXQ_CFG); |
1875 | reg &= ~RXQ_CFG_ENB; |
1876 | CSR_WRITE_4(sc, ALE_RXQ_CFG, reg); |
1877 | reg = CSR_READ_4(sc, ALE_DMA_CFG); |
1878 | reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB); |
1879 | CSR_WRITE_4(sc, ALE_DMA_CFG, reg); |
1880 | DELAY(1000); |
1881 | |
1882 | /* Stop Rx/Tx MACs. */ |
1883 | ale_stop_mac(sc); |
1884 | |
1885 | /* Disable interrupts again? XXX */ |
1886 | CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); |
1887 | |
1888 | /* |
1889 | * Free TX mbufs still in the queues. |
1890 | */ |
1891 | for (i = 0; i < ALE_TX_RING_CNT; i++) { |
1892 | txd = &sc->ale_cdata.ale_txdesc[i]; |
1893 | if (txd->tx_m != NULL) { |
1894 | bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); |
1895 | m_freem(txd->tx_m); |
1896 | txd->tx_m = NULL; |
1897 | } |
1898 | } |
1899 | } |
1900 | |
1901 | static void |
1902 | ale_stop_mac(struct ale_softc *sc) |
1903 | { |
1904 | uint32_t reg; |
1905 | int i; |
1906 | |
1907 | reg = CSR_READ_4(sc, ALE_MAC_CFG); |
1908 | if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { |
1909 | reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); |
1910 | CSR_WRITE_4(sc, ALE_MAC_CFG, reg); |
1911 | } |
1912 | |
1913 | for (i = ALE_TIMEOUT; i > 0; i--) { |
1914 | reg = CSR_READ_4(sc, ALE_IDLE_STATUS); |
1915 | if (reg == 0) |
1916 | break; |
1917 | DELAY(10); |
1918 | } |
1919 | if (i == 0) |
1920 | printf("%s: could not disable Tx/Rx MAC(0x%08x)!\n" , |
1921 | device_xname(sc->sc_dev), reg); |
1922 | } |
1923 | |
1924 | static void |
1925 | ale_init_tx_ring(struct ale_softc *sc) |
1926 | { |
1927 | struct ale_txdesc *txd; |
1928 | int i; |
1929 | |
1930 | sc->ale_cdata.ale_tx_prod = 0; |
1931 | sc->ale_cdata.ale_tx_cons = 0; |
1932 | sc->ale_cdata.ale_tx_cnt = 0; |
1933 | |
1934 | memset(sc->ale_cdata.ale_tx_ring, 0, ALE_TX_RING_SZ); |
1935 | memset(sc->ale_cdata.ale_tx_cmb, 0, ALE_TX_CMB_SZ); |
1936 | for (i = 0; i < ALE_TX_RING_CNT; i++) { |
1937 | txd = &sc->ale_cdata.ale_txdesc[i]; |
1938 | txd->tx_m = NULL; |
1939 | } |
1940 | *sc->ale_cdata.ale_tx_cmb = 0; |
1941 | bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0, |
1942 | sc->ale_cdata.ale_tx_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1943 | bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0, |
1944 | sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1945 | } |
1946 | |
1947 | static void |
1948 | ale_init_rx_pages(struct ale_softc *sc) |
1949 | { |
1950 | struct ale_rx_page *rx_page; |
1951 | int i; |
1952 | |
1953 | sc->ale_cdata.ale_rx_seqno = 0; |
1954 | sc->ale_cdata.ale_rx_curp = 0; |
1955 | |
1956 | for (i = 0; i < ALE_RX_PAGES; i++) { |
1957 | rx_page = &sc->ale_cdata.ale_rx_page[i]; |
1958 | memset(rx_page->page_addr, 0, sc->ale_pagesize); |
1959 | memset(rx_page->cmb_addr, 0, ALE_RX_CMB_SZ); |
1960 | rx_page->cons = 0; |
1961 | *rx_page->cmb_addr = 0; |
1962 | bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0, |
1963 | rx_page->page_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1964 | bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, |
1965 | rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1966 | } |
1967 | } |
1968 | |
1969 | static void |
1970 | ale_rxvlan(struct ale_softc *sc) |
1971 | { |
1972 | uint32_t reg; |
1973 | |
1974 | reg = CSR_READ_4(sc, ALE_MAC_CFG); |
1975 | reg &= ~MAC_CFG_VLAN_TAG_STRIP; |
1976 | if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) |
1977 | reg |= MAC_CFG_VLAN_TAG_STRIP; |
1978 | CSR_WRITE_4(sc, ALE_MAC_CFG, reg); |
1979 | } |
1980 | |
1981 | static void |
1982 | ale_rxfilter(struct ale_softc *sc) |
1983 | { |
1984 | struct ethercom *ec = &sc->sc_ec; |
1985 | struct ifnet *ifp = &ec->ec_if; |
1986 | struct ether_multi *enm; |
1987 | struct ether_multistep step; |
1988 | uint32_t crc; |
1989 | uint32_t mchash[2]; |
1990 | uint32_t rxcfg; |
1991 | |
1992 | rxcfg = CSR_READ_4(sc, ALE_MAC_CFG); |
1993 | rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); |
1994 | ifp->if_flags &= ~IFF_ALLMULTI; |
1995 | |
1996 | /* |
1997 | * Always accept broadcast frames. |
1998 | */ |
1999 | rxcfg |= MAC_CFG_BCAST; |
2000 | |
2001 | if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { |
2002 | ifp->if_flags |= IFF_ALLMULTI; |
2003 | if (ifp->if_flags & IFF_PROMISC) |
2004 | rxcfg |= MAC_CFG_PROMISC; |
2005 | else |
2006 | rxcfg |= MAC_CFG_ALLMULTI; |
2007 | mchash[0] = mchash[1] = 0xFFFFFFFF; |
2008 | } else { |
2009 | /* Program new filter. */ |
2010 | memset(mchash, 0, sizeof(mchash)); |
2011 | |
2012 | ETHER_FIRST_MULTI(step, ec, enm); |
2013 | while (enm != NULL) { |
2014 | crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); |
2015 | mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); |
2016 | ETHER_NEXT_MULTI(step, enm); |
2017 | } |
2018 | } |
2019 | |
2020 | CSR_WRITE_4(sc, ALE_MAR0, mchash[0]); |
2021 | CSR_WRITE_4(sc, ALE_MAR1, mchash[1]); |
2022 | CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); |
2023 | } |
2024 | |