1 | /* $NetBSD: if_age.c,v 1.48 2016/06/10 13:27:14 ozaki-r Exp $ */ |
2 | /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ |
3 | |
4 | /*- |
5 | * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> |
6 | * All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions |
10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice unmodified, this list of conditions, and the following |
13 | * disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
22 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 | * SUCH DAMAGE. |
29 | */ |
30 | |
31 | /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ |
32 | |
33 | #include <sys/cdefs.h> |
34 | __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.48 2016/06/10 13:27:14 ozaki-r Exp $" ); |
35 | |
36 | #include "vlan.h" |
37 | |
38 | #include <sys/param.h> |
39 | #include <sys/proc.h> |
40 | #include <sys/endian.h> |
41 | #include <sys/systm.h> |
42 | #include <sys/types.h> |
43 | #include <sys/sockio.h> |
44 | #include <sys/mbuf.h> |
45 | #include <sys/queue.h> |
46 | #include <sys/kernel.h> |
47 | #include <sys/device.h> |
48 | #include <sys/callout.h> |
49 | #include <sys/socket.h> |
50 | |
51 | #include <net/if.h> |
52 | #include <net/if_dl.h> |
53 | #include <net/if_media.h> |
54 | #include <net/if_ether.h> |
55 | |
56 | #ifdef INET |
57 | #include <netinet/in.h> |
58 | #include <netinet/in_systm.h> |
59 | #include <netinet/in_var.h> |
60 | #include <netinet/ip.h> |
61 | #endif |
62 | |
63 | #include <net/if_types.h> |
64 | #include <net/if_vlanvar.h> |
65 | |
66 | #include <net/bpf.h> |
67 | |
68 | #include <dev/mii/mii.h> |
69 | #include <dev/mii/miivar.h> |
70 | |
71 | #include <dev/pci/pcireg.h> |
72 | #include <dev/pci/pcivar.h> |
73 | #include <dev/pci/pcidevs.h> |
74 | |
75 | #include <dev/pci/if_agereg.h> |
76 | |
77 | static int age_match(device_t, cfdata_t, void *); |
78 | static void age_attach(device_t, device_t, void *); |
79 | static int age_detach(device_t, int); |
80 | |
81 | static bool age_resume(device_t, const pmf_qual_t *); |
82 | |
83 | static int age_miibus_readreg(device_t, int, int); |
84 | static void age_miibus_writereg(device_t, int, int, int); |
85 | static void age_miibus_statchg(struct ifnet *); |
86 | |
87 | static int age_init(struct ifnet *); |
88 | static int age_ioctl(struct ifnet *, u_long, void *); |
89 | static void age_start(struct ifnet *); |
90 | static void age_watchdog(struct ifnet *); |
91 | static bool age_shutdown(device_t, int); |
92 | static void age_mediastatus(struct ifnet *, struct ifmediareq *); |
93 | static int age_mediachange(struct ifnet *); |
94 | |
95 | static int age_intr(void *); |
96 | static int age_dma_alloc(struct age_softc *); |
97 | static void age_dma_free(struct age_softc *); |
98 | static void age_get_macaddr(struct age_softc *, uint8_t[]); |
99 | static void age_phy_reset(struct age_softc *); |
100 | |
101 | static int age_encap(struct age_softc *, struct mbuf **); |
102 | static void age_init_tx_ring(struct age_softc *); |
103 | static int age_init_rx_ring(struct age_softc *); |
104 | static void age_init_rr_ring(struct age_softc *); |
105 | static void age_init_cmb_block(struct age_softc *); |
106 | static void age_init_smb_block(struct age_softc *); |
107 | static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); |
108 | static void age_mac_config(struct age_softc *); |
109 | static void age_txintr(struct age_softc *, int); |
110 | static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); |
111 | static void age_rxintr(struct age_softc *, int); |
112 | static void age_tick(void *); |
113 | static void age_reset(struct age_softc *); |
114 | static void age_stop(struct ifnet *, int); |
115 | static void age_stats_update(struct age_softc *); |
116 | static void age_stop_txmac(struct age_softc *); |
117 | static void age_stop_rxmac(struct age_softc *); |
118 | static void age_rxvlan(struct age_softc *sc); |
119 | static void age_rxfilter(struct age_softc *); |
120 | |
121 | CFATTACH_DECL_NEW(age, sizeof(struct age_softc), |
122 | age_match, age_attach, age_detach, NULL); |
123 | |
124 | int agedebug = 0; |
125 | #define DPRINTF(x) do { if (agedebug) printf x; } while (0) |
126 | |
127 | #define ETHER_ALIGN 2 |
128 | #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) |
129 | |
130 | static int |
131 | age_match(device_t dev, cfdata_t match, void *aux) |
132 | { |
133 | struct pci_attach_args *pa = aux; |
134 | |
135 | return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && |
136 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); |
137 | } |
138 | |
139 | static void |
140 | age_attach(device_t parent, device_t self, void *aux) |
141 | { |
142 | struct age_softc *sc = device_private(self); |
143 | struct pci_attach_args *pa = aux; |
144 | pci_intr_handle_t ih; |
145 | const char *intrstr; |
146 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
147 | pcireg_t memtype; |
148 | int error = 0; |
149 | char intrbuf[PCI_INTRSTR_LEN]; |
150 | |
151 | aprint_naive("\n" ); |
152 | aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n" ); |
153 | |
154 | sc->sc_dev = self; |
155 | sc->sc_dmat = pa->pa_dmat; |
156 | sc->sc_pct = pa->pa_pc; |
157 | sc->sc_pcitag = pa->pa_tag; |
158 | |
159 | /* |
160 | * Allocate IO memory |
161 | */ |
162 | memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); |
163 | switch (memtype) { |
164 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
165 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: |
166 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
167 | break; |
168 | default: |
169 | aprint_error_dev(self, "invalid base address register\n" ); |
170 | break; |
171 | } |
172 | |
173 | if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, |
174 | &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { |
175 | aprint_error_dev(self, "could not map mem space\n" ); |
176 | return; |
177 | } |
178 | |
179 | if (pci_intr_map(pa, &ih) != 0) { |
180 | aprint_error_dev(self, "could not map interrupt\n" ); |
181 | goto fail; |
182 | } |
183 | |
184 | /* |
185 | * Allocate IRQ |
186 | */ |
187 | intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf)); |
188 | sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, |
189 | age_intr, sc); |
190 | if (sc->sc_irq_handle == NULL) { |
191 | aprint_error_dev(self, "could not establish interrupt" ); |
192 | if (intrstr != NULL) |
193 | aprint_error(" at %s" , intrstr); |
194 | aprint_error("\n" ); |
195 | goto fail; |
196 | } |
197 | aprint_normal_dev(self, "%s\n" , intrstr); |
198 | |
199 | /* Set PHY address. */ |
200 | sc->age_phyaddr = AGE_PHY_ADDR; |
201 | |
202 | /* Reset PHY. */ |
203 | age_phy_reset(sc); |
204 | |
205 | /* Reset the ethernet controller. */ |
206 | age_reset(sc); |
207 | |
208 | /* Get PCI and chip id/revision. */ |
209 | sc->age_rev = PCI_REVISION(pa->pa_class); |
210 | sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> |
211 | MASTER_CHIP_REV_SHIFT; |
212 | |
213 | aprint_debug_dev(self, "PCI device revision : 0x%04x\n" , sc->age_rev); |
214 | aprint_debug_dev(self, "Chip id/revision : 0x%04x\n" , sc->age_chip_rev); |
215 | |
216 | if (agedebug) { |
217 | aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n" , |
218 | CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), |
219 | CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); |
220 | } |
221 | |
222 | /* Set max allowable DMA size. */ |
223 | sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; |
224 | sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; |
225 | |
226 | /* Allocate DMA stuffs */ |
227 | error = age_dma_alloc(sc); |
228 | if (error) |
229 | goto fail; |
230 | |
231 | callout_init(&sc->sc_tick_ch, 0); |
232 | callout_setfunc(&sc->sc_tick_ch, age_tick, sc); |
233 | |
234 | /* Load station address. */ |
235 | age_get_macaddr(sc, sc->sc_enaddr); |
236 | |
237 | aprint_normal_dev(self, "Ethernet address %s\n" , |
238 | ether_sprintf(sc->sc_enaddr)); |
239 | |
240 | ifp->if_softc = sc; |
241 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
242 | ifp->if_init = age_init; |
243 | ifp->if_ioctl = age_ioctl; |
244 | ifp->if_start = age_start; |
245 | ifp->if_stop = age_stop; |
246 | ifp->if_watchdog = age_watchdog; |
247 | ifp->if_baudrate = IF_Gbps(1); |
248 | IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); |
249 | IFQ_SET_READY(&ifp->if_snd); |
250 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
251 | |
252 | sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; |
253 | |
254 | ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx | |
255 | IFCAP_CSUM_TCPv4_Rx | |
256 | IFCAP_CSUM_UDPv4_Rx; |
257 | #ifdef AGE_CHECKSUM |
258 | ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | |
259 | IFCAP_CSUM_TCPv4_Tx | |
260 | IFCAP_CSUM_UDPv4_Tx; |
261 | #endif |
262 | |
263 | #if NVLAN > 0 |
264 | sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; |
265 | #endif |
266 | |
267 | /* Set up MII bus. */ |
268 | sc->sc_miibus.mii_ifp = ifp; |
269 | sc->sc_miibus.mii_readreg = age_miibus_readreg; |
270 | sc->sc_miibus.mii_writereg = age_miibus_writereg; |
271 | sc->sc_miibus.mii_statchg = age_miibus_statchg; |
272 | |
273 | sc->sc_ec.ec_mii = &sc->sc_miibus; |
274 | ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, |
275 | age_mediastatus); |
276 | mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, |
277 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
278 | |
279 | if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { |
280 | aprint_error_dev(self, "no PHY found!\n" ); |
281 | ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, |
282 | 0, NULL); |
283 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); |
284 | } else |
285 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); |
286 | |
287 | if_attach(ifp); |
288 | ether_ifattach(ifp, sc->sc_enaddr); |
289 | |
290 | if (pmf_device_register1(self, NULL, age_resume, age_shutdown)) |
291 | pmf_class_network_register(self, ifp); |
292 | else |
293 | aprint_error_dev(self, "couldn't establish power handler\n" ); |
294 | |
295 | return; |
296 | |
297 | fail: |
298 | age_dma_free(sc); |
299 | if (sc->sc_irq_handle != NULL) { |
300 | pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); |
301 | sc->sc_irq_handle = NULL; |
302 | } |
303 | if (sc->sc_mem_size) { |
304 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); |
305 | sc->sc_mem_size = 0; |
306 | } |
307 | } |
308 | |
309 | static int |
310 | age_detach(device_t self, int flags) |
311 | { |
312 | struct age_softc *sc = device_private(self); |
313 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
314 | int s; |
315 | |
316 | pmf_device_deregister(self); |
317 | s = splnet(); |
318 | age_stop(ifp, 0); |
319 | splx(s); |
320 | |
321 | mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); |
322 | |
323 | /* Delete all remaining media. */ |
324 | ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); |
325 | |
326 | ether_ifdetach(ifp); |
327 | if_detach(ifp); |
328 | age_dma_free(sc); |
329 | |
330 | if (sc->sc_irq_handle != NULL) { |
331 | pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); |
332 | sc->sc_irq_handle = NULL; |
333 | } |
334 | if (sc->sc_mem_size) { |
335 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); |
336 | sc->sc_mem_size = 0; |
337 | } |
338 | return 0; |
339 | } |
340 | |
341 | /* |
342 | * Read a PHY register on the MII of the L1. |
343 | */ |
344 | static int |
345 | age_miibus_readreg(device_t dev, int phy, int reg) |
346 | { |
347 | struct age_softc *sc = device_private(dev); |
348 | uint32_t v; |
349 | int i; |
350 | |
351 | if (phy != sc->age_phyaddr) |
352 | return 0; |
353 | |
354 | CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | |
355 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); |
356 | for (i = AGE_PHY_TIMEOUT; i > 0; i--) { |
357 | DELAY(1); |
358 | v = CSR_READ_4(sc, AGE_MDIO); |
359 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) |
360 | break; |
361 | } |
362 | |
363 | if (i == 0) { |
364 | printf("%s: phy read timeout: phy %d, reg %d\n" , |
365 | device_xname(sc->sc_dev), phy, reg); |
366 | return 0; |
367 | } |
368 | |
369 | return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); |
370 | } |
371 | |
372 | /* |
373 | * Write a PHY register on the MII of the L1. |
374 | */ |
375 | static void |
376 | age_miibus_writereg(device_t dev, int phy, int reg, int val) |
377 | { |
378 | struct age_softc *sc = device_private(dev); |
379 | uint32_t v; |
380 | int i; |
381 | |
382 | if (phy != sc->age_phyaddr) |
383 | return; |
384 | |
385 | CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | |
386 | (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | |
387 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); |
388 | |
389 | for (i = AGE_PHY_TIMEOUT; i > 0; i--) { |
390 | DELAY(1); |
391 | v = CSR_READ_4(sc, AGE_MDIO); |
392 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) |
393 | break; |
394 | } |
395 | |
396 | if (i == 0) { |
397 | printf("%s: phy write timeout: phy %d, reg %d\n" , |
398 | device_xname(sc->sc_dev), phy, reg); |
399 | } |
400 | } |
401 | |
402 | /* |
403 | * Callback from MII layer when media changes. |
404 | */ |
405 | static void |
406 | age_miibus_statchg(struct ifnet *ifp) |
407 | { |
408 | struct age_softc *sc = ifp->if_softc; |
409 | struct mii_data *mii = &sc->sc_miibus; |
410 | |
411 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
412 | return; |
413 | |
414 | sc->age_flags &= ~AGE_FLAG_LINK; |
415 | if ((mii->mii_media_status & IFM_AVALID) != 0) { |
416 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
417 | case IFM_10_T: |
418 | case IFM_100_TX: |
419 | case IFM_1000_T: |
420 | sc->age_flags |= AGE_FLAG_LINK; |
421 | break; |
422 | default: |
423 | break; |
424 | } |
425 | } |
426 | |
427 | /* Stop Rx/Tx MACs. */ |
428 | age_stop_rxmac(sc); |
429 | age_stop_txmac(sc); |
430 | |
431 | /* Program MACs with resolved speed/duplex/flow-control. */ |
432 | if ((sc->age_flags & AGE_FLAG_LINK) != 0) { |
433 | uint32_t reg; |
434 | |
435 | age_mac_config(sc); |
436 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
437 | /* Restart DMA engine and Tx/Rx MAC. */ |
438 | CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | |
439 | DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); |
440 | reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; |
441 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); |
442 | } |
443 | } |
444 | |
445 | /* |
446 | * Get the current interface media status. |
447 | */ |
448 | static void |
449 | age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
450 | { |
451 | struct age_softc *sc = ifp->if_softc; |
452 | struct mii_data *mii = &sc->sc_miibus; |
453 | |
454 | mii_pollstat(mii); |
455 | ifmr->ifm_status = mii->mii_media_status; |
456 | ifmr->ifm_active = mii->mii_media_active; |
457 | } |
458 | |
459 | /* |
460 | * Set hardware to newly-selected media. |
461 | */ |
462 | static int |
463 | age_mediachange(struct ifnet *ifp) |
464 | { |
465 | struct age_softc *sc = ifp->if_softc; |
466 | struct mii_data *mii = &sc->sc_miibus; |
467 | int error; |
468 | |
469 | if (mii->mii_instance != 0) { |
470 | struct mii_softc *miisc; |
471 | |
472 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
473 | mii_phy_reset(miisc); |
474 | } |
475 | error = mii_mediachg(mii); |
476 | |
477 | return error; |
478 | } |
479 | |
480 | static int |
481 | age_intr(void *arg) |
482 | { |
483 | struct age_softc *sc = arg; |
484 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
485 | struct cmb *cmb; |
486 | uint32_t status; |
487 | |
488 | status = CSR_READ_4(sc, AGE_INTR_STATUS); |
489 | if (status == 0 || (status & AGE_INTRS) == 0) |
490 | return 0; |
491 | |
492 | cmb = sc->age_rdata.age_cmb_block; |
493 | if (cmb == NULL) { |
494 | /* Happens when bringing up the interface |
495 | * w/o having a carrier. Ack the interrupt. |
496 | */ |
497 | CSR_WRITE_4(sc, AGE_INTR_STATUS, status); |
498 | return 0; |
499 | } |
500 | |
501 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, |
502 | sc->age_cdata.age_cmb_block_map->dm_mapsize, |
503 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
504 | status = le32toh(cmb->intr_status); |
505 | /* ACK/reenable interrupts */ |
506 | CSR_WRITE_4(sc, AGE_INTR_STATUS, status); |
507 | while ((status & AGE_INTRS) != 0) { |
508 | sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> |
509 | TPD_CONS_SHIFT; |
510 | sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> |
511 | RRD_PROD_SHIFT; |
512 | |
513 | /* Let hardware know CMB was served. */ |
514 | cmb->intr_status = 0; |
515 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, |
516 | sc->age_cdata.age_cmb_block_map->dm_mapsize, |
517 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
518 | |
519 | if (ifp->if_flags & IFF_RUNNING) { |
520 | if (status & INTR_CMB_RX) |
521 | age_rxintr(sc, sc->age_rr_prod); |
522 | |
523 | if (status & INTR_CMB_TX) |
524 | age_txintr(sc, sc->age_tpd_cons); |
525 | |
526 | if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { |
527 | if (status & INTR_DMA_RD_TO_RST) |
528 | printf("%s: DMA read error! -- " |
529 | "resetting\n" , |
530 | device_xname(sc->sc_dev)); |
531 | if (status & INTR_DMA_WR_TO_RST) |
532 | printf("%s: DMA write error! -- " |
533 | "resetting\n" , |
534 | device_xname(sc->sc_dev)); |
535 | age_init(ifp); |
536 | } |
537 | |
538 | age_start(ifp); |
539 | |
540 | if (status & INTR_SMB) |
541 | age_stats_update(sc); |
542 | } |
543 | /* check if more interrupts did came in */ |
544 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, |
545 | sc->age_cdata.age_cmb_block_map->dm_mapsize, |
546 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
547 | status = le32toh(cmb->intr_status); |
548 | } |
549 | |
550 | return 1; |
551 | } |
552 | |
553 | static void |
554 | age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) |
555 | { |
556 | uint32_t ea[2], reg; |
557 | int i, vpdc; |
558 | |
559 | reg = CSR_READ_4(sc, AGE_SPI_CTRL); |
560 | if ((reg & SPI_VPD_ENB) != 0) { |
561 | /* Get VPD stored in TWSI EEPROM. */ |
562 | reg &= ~SPI_VPD_ENB; |
563 | CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); |
564 | } |
565 | |
566 | if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, |
567 | PCI_CAP_VPD, &vpdc, NULL)) { |
568 | /* |
569 | * PCI VPD capability found, let TWSI reload EEPROM. |
570 | * This will set Ethernet address of controller. |
571 | */ |
572 | CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | |
573 | TWSI_CTRL_SW_LD_START); |
574 | for (i = 100; i > 0; i++) { |
575 | DELAY(1000); |
576 | reg = CSR_READ_4(sc, AGE_TWSI_CTRL); |
577 | if ((reg & TWSI_CTRL_SW_LD_START) == 0) |
578 | break; |
579 | } |
580 | if (i == 0) |
581 | printf("%s: reloading EEPROM timeout!\n" , |
582 | device_xname(sc->sc_dev)); |
583 | } else { |
584 | if (agedebug) |
585 | printf("%s: PCI VPD capability not found!\n" , |
586 | device_xname(sc->sc_dev)); |
587 | } |
588 | |
589 | ea[0] = CSR_READ_4(sc, AGE_PAR0); |
590 | ea[1] = CSR_READ_4(sc, AGE_PAR1); |
591 | |
592 | eaddr[0] = (ea[1] >> 8) & 0xFF; |
593 | eaddr[1] = (ea[1] >> 0) & 0xFF; |
594 | eaddr[2] = (ea[0] >> 24) & 0xFF; |
595 | eaddr[3] = (ea[0] >> 16) & 0xFF; |
596 | eaddr[4] = (ea[0] >> 8) & 0xFF; |
597 | eaddr[5] = (ea[0] >> 0) & 0xFF; |
598 | } |
599 | |
600 | static void |
601 | age_phy_reset(struct age_softc *sc) |
602 | { |
603 | uint16_t reg, pn; |
604 | int i, linkup; |
605 | |
606 | /* Reset PHY. */ |
607 | CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); |
608 | DELAY(2000); |
609 | CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); |
610 | DELAY(2000); |
611 | |
612 | #define ATPHY_DBG_ADDR 0x1D |
613 | #define ATPHY_DBG_DATA 0x1E |
614 | #define ATPHY_CDTC 0x16 |
615 | #define PHY_CDTC_ENB 0x0001 |
616 | #define PHY_CDTC_POFF 8 |
617 | #define ATPHY_CDTS 0x1C |
618 | #define PHY_CDTS_STAT_OK 0x0000 |
619 | #define PHY_CDTS_STAT_SHORT 0x0100 |
620 | #define PHY_CDTS_STAT_OPEN 0x0200 |
621 | #define PHY_CDTS_STAT_INVAL 0x0300 |
622 | #define PHY_CDTS_STAT_MASK 0x0300 |
623 | |
624 | /* Check power saving mode. Magic from Linux. */ |
625 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); |
626 | for (linkup = 0, pn = 0; pn < 4; pn++) { |
627 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, |
628 | (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); |
629 | for (i = 200; i > 0; i--) { |
630 | DELAY(1000); |
631 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, |
632 | ATPHY_CDTC); |
633 | if ((reg & PHY_CDTC_ENB) == 0) |
634 | break; |
635 | } |
636 | DELAY(1000); |
637 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, |
638 | ATPHY_CDTS); |
639 | if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { |
640 | linkup++; |
641 | break; |
642 | } |
643 | } |
644 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, |
645 | BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); |
646 | if (linkup == 0) { |
647 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
648 | ATPHY_DBG_ADDR, 0); |
649 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
650 | ATPHY_DBG_DATA, 0x124E); |
651 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
652 | ATPHY_DBG_ADDR, 1); |
653 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, |
654 | ATPHY_DBG_DATA); |
655 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
656 | ATPHY_DBG_DATA, reg | 0x03); |
657 | /* XXX */ |
658 | DELAY(1500 * 1000); |
659 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
660 | ATPHY_DBG_ADDR, 0); |
661 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
662 | ATPHY_DBG_DATA, 0x024E); |
663 | } |
664 | |
665 | #undef ATPHY_DBG_ADDR |
666 | #undef ATPHY_DBG_DATA |
667 | #undef ATPHY_CDTC |
668 | #undef PHY_CDTC_ENB |
669 | #undef PHY_CDTC_POFF |
670 | #undef ATPHY_CDTS |
671 | #undef PHY_CDTS_STAT_OK |
672 | #undef PHY_CDTS_STAT_SHORT |
673 | #undef PHY_CDTS_STAT_OPEN |
674 | #undef PHY_CDTS_STAT_INVAL |
675 | #undef PHY_CDTS_STAT_MASK |
676 | } |
677 | |
678 | static int |
679 | age_dma_alloc(struct age_softc *sc) |
680 | { |
681 | struct age_txdesc *txd; |
682 | struct age_rxdesc *rxd; |
683 | int nsegs, error, i; |
684 | |
685 | /* |
686 | * Create DMA stuffs for TX ring |
687 | */ |
688 | error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, |
689 | AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); |
690 | if (error) { |
691 | sc->age_cdata.age_tx_ring_map = NULL; |
692 | return ENOBUFS; |
693 | } |
694 | |
695 | /* Allocate DMA'able memory for TX ring */ |
696 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, |
697 | ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, |
698 | &nsegs, BUS_DMA_NOWAIT); |
699 | if (error) { |
700 | printf("%s: could not allocate DMA'able memory for Tx ring, " |
701 | "error = %i\n" , device_xname(sc->sc_dev), error); |
702 | return error; |
703 | } |
704 | |
705 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, |
706 | nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, |
707 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); |
708 | if (error) |
709 | return ENOBUFS; |
710 | |
711 | memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); |
712 | |
713 | /* Load the DMA map for Tx ring. */ |
714 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, |
715 | sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT); |
716 | if (error) { |
717 | printf("%s: could not load DMA'able memory for Tx ring, " |
718 | "error = %i\n" , device_xname(sc->sc_dev), error); |
719 | bus_dmamem_free(sc->sc_dmat, |
720 | &sc->age_rdata.age_tx_ring_seg, 1); |
721 | return error; |
722 | } |
723 | |
724 | sc->age_rdata.age_tx_ring_paddr = |
725 | sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; |
726 | |
727 | /* |
728 | * Create DMA stuffs for RX ring |
729 | */ |
730 | error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, |
731 | AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); |
732 | if (error) { |
733 | sc->age_cdata.age_rx_ring_map = NULL; |
734 | return ENOBUFS; |
735 | } |
736 | |
737 | /* Allocate DMA'able memory for RX ring */ |
738 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, |
739 | ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, |
740 | &nsegs, BUS_DMA_NOWAIT); |
741 | if (error) { |
742 | printf("%s: could not allocate DMA'able memory for Rx ring, " |
743 | "error = %i.\n" , device_xname(sc->sc_dev), error); |
744 | return error; |
745 | } |
746 | |
747 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, |
748 | nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, |
749 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); |
750 | if (error) |
751 | return ENOBUFS; |
752 | |
753 | memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); |
754 | |
755 | /* Load the DMA map for Rx ring. */ |
756 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, |
757 | sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT); |
758 | if (error) { |
759 | printf("%s: could not load DMA'able memory for Rx ring, " |
760 | "error = %i.\n" , device_xname(sc->sc_dev), error); |
761 | bus_dmamem_free(sc->sc_dmat, |
762 | &sc->age_rdata.age_rx_ring_seg, 1); |
763 | return error; |
764 | } |
765 | |
766 | sc->age_rdata.age_rx_ring_paddr = |
767 | sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; |
768 | |
769 | /* |
770 | * Create DMA stuffs for RX return ring |
771 | */ |
772 | error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, |
773 | AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); |
774 | if (error) { |
775 | sc->age_cdata.age_rr_ring_map = NULL; |
776 | return ENOBUFS; |
777 | } |
778 | |
779 | /* Allocate DMA'able memory for RX return ring */ |
780 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, |
781 | ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, |
782 | &nsegs, BUS_DMA_NOWAIT); |
783 | if (error) { |
784 | printf("%s: could not allocate DMA'able memory for Rx " |
785 | "return ring, error = %i.\n" , |
786 | device_xname(sc->sc_dev), error); |
787 | return error; |
788 | } |
789 | |
790 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, |
791 | nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, |
792 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); |
793 | if (error) |
794 | return ENOBUFS; |
795 | |
796 | memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); |
797 | |
798 | /* Load the DMA map for Rx return ring. */ |
799 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, |
800 | sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT); |
801 | if (error) { |
802 | printf("%s: could not load DMA'able memory for Rx return ring, " |
803 | "error = %i\n" , device_xname(sc->sc_dev), error); |
804 | bus_dmamem_free(sc->sc_dmat, |
805 | &sc->age_rdata.age_rr_ring_seg, 1); |
806 | return error; |
807 | } |
808 | |
809 | sc->age_rdata.age_rr_ring_paddr = |
810 | sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; |
811 | |
812 | /* |
813 | * Create DMA stuffs for CMB block |
814 | */ |
815 | error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, |
816 | AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, |
817 | &sc->age_cdata.age_cmb_block_map); |
818 | if (error) { |
819 | sc->age_cdata.age_cmb_block_map = NULL; |
820 | return ENOBUFS; |
821 | } |
822 | |
823 | /* Allocate DMA'able memory for CMB block */ |
824 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, |
825 | ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, |
826 | &nsegs, BUS_DMA_NOWAIT); |
827 | if (error) { |
828 | printf("%s: could not allocate DMA'able memory for " |
829 | "CMB block, error = %i\n" , device_xname(sc->sc_dev), error); |
830 | return error; |
831 | } |
832 | |
833 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, |
834 | nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, |
835 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); |
836 | if (error) |
837 | return ENOBUFS; |
838 | |
839 | memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); |
840 | |
841 | /* Load the DMA map for CMB block. */ |
842 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, |
843 | sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, |
844 | BUS_DMA_NOWAIT); |
845 | if (error) { |
846 | printf("%s: could not load DMA'able memory for CMB block, " |
847 | "error = %i\n" , device_xname(sc->sc_dev), error); |
848 | bus_dmamem_free(sc->sc_dmat, |
849 | &sc->age_rdata.age_cmb_block_seg, 1); |
850 | return error; |
851 | } |
852 | |
853 | sc->age_rdata.age_cmb_block_paddr = |
854 | sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; |
855 | |
856 | /* |
857 | * Create DMA stuffs for SMB block |
858 | */ |
859 | error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, |
860 | AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, |
861 | &sc->age_cdata.age_smb_block_map); |
862 | if (error) { |
863 | sc->age_cdata.age_smb_block_map = NULL; |
864 | return ENOBUFS; |
865 | } |
866 | |
867 | /* Allocate DMA'able memory for SMB block */ |
868 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, |
869 | ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, |
870 | &nsegs, BUS_DMA_NOWAIT); |
871 | if (error) { |
872 | printf("%s: could not allocate DMA'able memory for " |
873 | "SMB block, error = %i\n" , device_xname(sc->sc_dev), error); |
874 | return error; |
875 | } |
876 | |
877 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, |
878 | nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, |
879 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); |
880 | if (error) |
881 | return ENOBUFS; |
882 | |
883 | memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); |
884 | |
885 | /* Load the DMA map for SMB block */ |
886 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, |
887 | sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, |
888 | BUS_DMA_NOWAIT); |
889 | if (error) { |
890 | printf("%s: could not load DMA'able memory for SMB block, " |
891 | "error = %i\n" , device_xname(sc->sc_dev), error); |
892 | bus_dmamem_free(sc->sc_dmat, |
893 | &sc->age_rdata.age_smb_block_seg, 1); |
894 | return error; |
895 | } |
896 | |
897 | sc->age_rdata.age_smb_block_paddr = |
898 | sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; |
899 | |
900 | /* Create DMA maps for Tx buffers. */ |
901 | for (i = 0; i < AGE_TX_RING_CNT; i++) { |
902 | txd = &sc->age_cdata.age_txdesc[i]; |
903 | txd->tx_m = NULL; |
904 | txd->tx_dmamap = NULL; |
905 | error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, |
906 | AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, |
907 | &txd->tx_dmamap); |
908 | if (error) { |
909 | txd->tx_dmamap = NULL; |
910 | printf("%s: could not create Tx dmamap, error = %i.\n" , |
911 | device_xname(sc->sc_dev), error); |
912 | return error; |
913 | } |
914 | } |
915 | |
916 | /* Create DMA maps for Rx buffers. */ |
917 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, |
918 | BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); |
919 | if (error) { |
920 | sc->age_cdata.age_rx_sparemap = NULL; |
921 | printf("%s: could not create spare Rx dmamap, error = %i.\n" , |
922 | device_xname(sc->sc_dev), error); |
923 | return error; |
924 | } |
925 | for (i = 0; i < AGE_RX_RING_CNT; i++) { |
926 | rxd = &sc->age_cdata.age_rxdesc[i]; |
927 | rxd->rx_m = NULL; |
928 | rxd->rx_dmamap = NULL; |
929 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
930 | MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); |
931 | if (error) { |
932 | rxd->rx_dmamap = NULL; |
933 | printf("%s: could not create Rx dmamap, error = %i.\n" , |
934 | device_xname(sc->sc_dev), error); |
935 | return error; |
936 | } |
937 | } |
938 | |
939 | return 0; |
940 | } |
941 | |
942 | static void |
943 | age_dma_free(struct age_softc *sc) |
944 | { |
945 | struct age_txdesc *txd; |
946 | struct age_rxdesc *rxd; |
947 | int i; |
948 | |
949 | /* Tx buffers */ |
950 | for (i = 0; i < AGE_TX_RING_CNT; i++) { |
951 | txd = &sc->age_cdata.age_txdesc[i]; |
952 | if (txd->tx_dmamap != NULL) { |
953 | bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); |
954 | txd->tx_dmamap = NULL; |
955 | } |
956 | } |
957 | /* Rx buffers */ |
958 | for (i = 0; i < AGE_RX_RING_CNT; i++) { |
959 | rxd = &sc->age_cdata.age_rxdesc[i]; |
960 | if (rxd->rx_dmamap != NULL) { |
961 | bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); |
962 | rxd->rx_dmamap = NULL; |
963 | } |
964 | } |
965 | if (sc->age_cdata.age_rx_sparemap != NULL) { |
966 | bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); |
967 | sc->age_cdata.age_rx_sparemap = NULL; |
968 | } |
969 | |
970 | /* Tx ring. */ |
971 | if (sc->age_cdata.age_tx_ring_map != NULL) |
972 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); |
973 | if (sc->age_cdata.age_tx_ring_map != NULL && |
974 | sc->age_rdata.age_tx_ring != NULL) |
975 | bus_dmamem_free(sc->sc_dmat, |
976 | &sc->age_rdata.age_tx_ring_seg, 1); |
977 | sc->age_rdata.age_tx_ring = NULL; |
978 | sc->age_cdata.age_tx_ring_map = NULL; |
979 | |
980 | /* Rx ring. */ |
981 | if (sc->age_cdata.age_rx_ring_map != NULL) |
982 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); |
983 | if (sc->age_cdata.age_rx_ring_map != NULL && |
984 | sc->age_rdata.age_rx_ring != NULL) |
985 | bus_dmamem_free(sc->sc_dmat, |
986 | &sc->age_rdata.age_rx_ring_seg, 1); |
987 | sc->age_rdata.age_rx_ring = NULL; |
988 | sc->age_cdata.age_rx_ring_map = NULL; |
989 | |
990 | /* Rx return ring. */ |
991 | if (sc->age_cdata.age_rr_ring_map != NULL) |
992 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); |
993 | if (sc->age_cdata.age_rr_ring_map != NULL && |
994 | sc->age_rdata.age_rr_ring != NULL) |
995 | bus_dmamem_free(sc->sc_dmat, |
996 | &sc->age_rdata.age_rr_ring_seg, 1); |
997 | sc->age_rdata.age_rr_ring = NULL; |
998 | sc->age_cdata.age_rr_ring_map = NULL; |
999 | |
1000 | /* CMB block */ |
1001 | if (sc->age_cdata.age_cmb_block_map != NULL) |
1002 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); |
1003 | if (sc->age_cdata.age_cmb_block_map != NULL && |
1004 | sc->age_rdata.age_cmb_block != NULL) |
1005 | bus_dmamem_free(sc->sc_dmat, |
1006 | &sc->age_rdata.age_cmb_block_seg, 1); |
1007 | sc->age_rdata.age_cmb_block = NULL; |
1008 | sc->age_cdata.age_cmb_block_map = NULL; |
1009 | |
1010 | /* SMB block */ |
1011 | if (sc->age_cdata.age_smb_block_map != NULL) |
1012 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); |
1013 | if (sc->age_cdata.age_smb_block_map != NULL && |
1014 | sc->age_rdata.age_smb_block != NULL) |
1015 | bus_dmamem_free(sc->sc_dmat, |
1016 | &sc->age_rdata.age_smb_block_seg, 1); |
1017 | sc->age_rdata.age_smb_block = NULL; |
1018 | sc->age_cdata.age_smb_block_map = NULL; |
1019 | } |
1020 | |
1021 | static void |
1022 | age_start(struct ifnet *ifp) |
1023 | { |
1024 | struct age_softc *sc = ifp->if_softc; |
1025 | struct mbuf *m_head; |
1026 | int enq; |
1027 | |
1028 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
1029 | return; |
1030 | if ((sc->age_flags & AGE_FLAG_LINK) == 0) |
1031 | return; |
1032 | if (IFQ_IS_EMPTY(&ifp->if_snd)) |
1033 | return; |
1034 | |
1035 | enq = 0; |
1036 | for (;;) { |
1037 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
1038 | if (m_head == NULL) |
1039 | break; |
1040 | |
1041 | /* |
1042 | * Pack the data into the transmit ring. If we |
1043 | * don't have room, set the OACTIVE flag and wait |
1044 | * for the NIC to drain the ring. |
1045 | */ |
1046 | if (age_encap(sc, &m_head)) { |
1047 | if (m_head == NULL) |
1048 | break; |
1049 | IF_PREPEND(&ifp->if_snd, m_head); |
1050 | ifp->if_flags |= IFF_OACTIVE; |
1051 | break; |
1052 | } |
1053 | enq = 1; |
1054 | |
1055 | /* |
1056 | * If there's a BPF listener, bounce a copy of this frame |
1057 | * to him. |
1058 | */ |
1059 | bpf_mtap(ifp, m_head); |
1060 | } |
1061 | |
1062 | if (enq) { |
1063 | /* Update mbox. */ |
1064 | AGE_COMMIT_MBOX(sc); |
1065 | /* Set a timeout in case the chip goes out to lunch. */ |
1066 | ifp->if_timer = AGE_TX_TIMEOUT; |
1067 | } |
1068 | } |
1069 | |
1070 | static void |
1071 | age_watchdog(struct ifnet *ifp) |
1072 | { |
1073 | struct age_softc *sc = ifp->if_softc; |
1074 | |
1075 | if ((sc->age_flags & AGE_FLAG_LINK) == 0) { |
1076 | printf("%s: watchdog timeout (missed link)\n" , |
1077 | device_xname(sc->sc_dev)); |
1078 | ifp->if_oerrors++; |
1079 | age_init(ifp); |
1080 | return; |
1081 | } |
1082 | |
1083 | if (sc->age_cdata.age_tx_cnt == 0) { |
1084 | printf("%s: watchdog timeout (missed Tx interrupts) " |
1085 | "-- recovering\n" , device_xname(sc->sc_dev)); |
1086 | age_start(ifp); |
1087 | return; |
1088 | } |
1089 | |
1090 | printf("%s: watchdog timeout\n" , device_xname(sc->sc_dev)); |
1091 | ifp->if_oerrors++; |
1092 | age_init(ifp); |
1093 | age_start(ifp); |
1094 | } |
1095 | |
1096 | static bool |
1097 | age_shutdown(device_t self, int howto) |
1098 | { |
1099 | struct age_softc *sc; |
1100 | struct ifnet *ifp; |
1101 | |
1102 | sc = device_private(self); |
1103 | ifp = &sc->sc_ec.ec_if; |
1104 | age_stop(ifp, 1); |
1105 | |
1106 | return true; |
1107 | } |
1108 | |
1109 | |
1110 | static int |
1111 | age_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
1112 | { |
1113 | struct age_softc *sc = ifp->if_softc; |
1114 | int s, error; |
1115 | |
1116 | s = splnet(); |
1117 | |
1118 | error = ether_ioctl(ifp, cmd, data); |
1119 | if (error == ENETRESET) { |
1120 | if (ifp->if_flags & IFF_RUNNING) |
1121 | age_rxfilter(sc); |
1122 | error = 0; |
1123 | } |
1124 | |
1125 | splx(s); |
1126 | return error; |
1127 | } |
1128 | |
1129 | static void |
1130 | age_mac_config(struct age_softc *sc) |
1131 | { |
1132 | struct mii_data *mii; |
1133 | uint32_t reg; |
1134 | |
1135 | mii = &sc->sc_miibus; |
1136 | |
1137 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
1138 | reg &= ~MAC_CFG_FULL_DUPLEX; |
1139 | reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); |
1140 | reg &= ~MAC_CFG_SPEED_MASK; |
1141 | |
1142 | /* Reprogram MAC with resolved speed/duplex. */ |
1143 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
1144 | case IFM_10_T: |
1145 | case IFM_100_TX: |
1146 | reg |= MAC_CFG_SPEED_10_100; |
1147 | break; |
1148 | case IFM_1000_T: |
1149 | reg |= MAC_CFG_SPEED_1000; |
1150 | break; |
1151 | } |
1152 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { |
1153 | reg |= MAC_CFG_FULL_DUPLEX; |
1154 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) |
1155 | reg |= MAC_CFG_TX_FC; |
1156 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) |
1157 | reg |= MAC_CFG_RX_FC; |
1158 | } |
1159 | |
1160 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); |
1161 | } |
1162 | |
1163 | static bool |
1164 | age_resume(device_t dv, const pmf_qual_t *qual) |
1165 | { |
1166 | struct age_softc *sc = device_private(dv); |
1167 | uint16_t cmd; |
1168 | |
1169 | /* |
1170 | * Clear INTx emulation disable for hardware that |
1171 | * is set in resume event. From Linux. |
1172 | */ |
1173 | cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); |
1174 | if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { |
1175 | cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; |
1176 | pci_conf_write(sc->sc_pct, sc->sc_pcitag, |
1177 | PCI_COMMAND_STATUS_REG, cmd); |
1178 | } |
1179 | |
1180 | return true; |
1181 | } |
1182 | |
1183 | static int |
1184 | age_encap(struct age_softc *sc, struct mbuf **m_head) |
1185 | { |
1186 | struct age_txdesc *txd, *txd_last; |
1187 | struct tx_desc *desc; |
1188 | struct mbuf *m; |
1189 | bus_dmamap_t map; |
1190 | uint32_t cflags, poff, vtag; |
1191 | int error, i, nsegs, prod; |
1192 | #if NVLAN > 0 |
1193 | struct m_tag *mtag; |
1194 | #endif |
1195 | |
1196 | m = *m_head; |
1197 | cflags = vtag = 0; |
1198 | poff = 0; |
1199 | |
1200 | prod = sc->age_cdata.age_tx_prod; |
1201 | txd = &sc->age_cdata.age_txdesc[prod]; |
1202 | txd_last = txd; |
1203 | map = txd->tx_dmamap; |
1204 | |
1205 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); |
1206 | |
1207 | if (error == EFBIG) { |
1208 | error = 0; |
1209 | |
1210 | *m_head = m_pullup(*m_head, MHLEN); |
1211 | if (*m_head == NULL) { |
1212 | printf("%s: can't defrag TX mbuf\n" , |
1213 | device_xname(sc->sc_dev)); |
1214 | return ENOBUFS; |
1215 | } |
1216 | |
1217 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, |
1218 | BUS_DMA_NOWAIT); |
1219 | |
1220 | if (error != 0) { |
1221 | printf("%s: could not load defragged TX mbuf\n" , |
1222 | device_xname(sc->sc_dev)); |
1223 | m_freem(*m_head); |
1224 | *m_head = NULL; |
1225 | return error; |
1226 | } |
1227 | } else if (error) { |
1228 | printf("%s: could not load TX mbuf\n" , device_xname(sc->sc_dev)); |
1229 | return error; |
1230 | } |
1231 | |
1232 | nsegs = map->dm_nsegs; |
1233 | |
1234 | if (nsegs == 0) { |
1235 | m_freem(*m_head); |
1236 | *m_head = NULL; |
1237 | return EIO; |
1238 | } |
1239 | |
1240 | /* Check descriptor overrun. */ |
1241 | if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { |
1242 | bus_dmamap_unload(sc->sc_dmat, map); |
1243 | return ENOBUFS; |
1244 | } |
1245 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, |
1246 | BUS_DMASYNC_PREWRITE); |
1247 | |
1248 | m = *m_head; |
1249 | /* Configure Tx IP/TCP/UDP checksum offload. */ |
1250 | if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { |
1251 | cflags |= AGE_TD_CSUM; |
1252 | if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0) |
1253 | cflags |= AGE_TD_TCPCSUM; |
1254 | if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0) |
1255 | cflags |= AGE_TD_UDPCSUM; |
1256 | /* Set checksum start offset. */ |
1257 | cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); |
1258 | } |
1259 | |
1260 | #if NVLAN > 0 |
1261 | /* Configure VLAN hardware tag insertion. */ |
1262 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) { |
1263 | vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag))); |
1264 | vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); |
1265 | cflags |= AGE_TD_INSERT_VLAN_TAG; |
1266 | } |
1267 | #endif |
1268 | |
1269 | desc = NULL; |
1270 | KASSERT(nsegs > 0); |
1271 | for (i = 0; ; i++) { |
1272 | desc = &sc->age_rdata.age_tx_ring[prod]; |
1273 | desc->addr = htole64(map->dm_segs[i].ds_addr); |
1274 | desc->len = |
1275 | htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); |
1276 | desc->flags = htole32(cflags); |
1277 | sc->age_cdata.age_tx_cnt++; |
1278 | if (i == (nsegs - 1)) |
1279 | break; |
1280 | |
1281 | /* sync this descriptor and go to the next one */ |
1282 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, |
1283 | prod * sizeof(struct tx_desc), sizeof(struct tx_desc), |
1284 | BUS_DMASYNC_PREWRITE); |
1285 | AGE_DESC_INC(prod, AGE_TX_RING_CNT); |
1286 | } |
1287 | |
1288 | /* Set EOP on the last descriptor and sync it. */ |
1289 | desc->flags |= htole32(AGE_TD_EOP); |
1290 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, |
1291 | prod * sizeof(struct tx_desc), sizeof(struct tx_desc), |
1292 | BUS_DMASYNC_PREWRITE); |
1293 | |
1294 | if (nsegs > 1) { |
1295 | /* Swap dmamap of the first and the last. */ |
1296 | txd = &sc->age_cdata.age_txdesc[prod]; |
1297 | map = txd_last->tx_dmamap; |
1298 | txd_last->tx_dmamap = txd->tx_dmamap; |
1299 | txd->tx_dmamap = map; |
1300 | txd->tx_m = m; |
1301 | KASSERT(txd_last->tx_m == NULL); |
1302 | } else { |
1303 | KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]); |
1304 | txd_last->tx_m = m; |
1305 | } |
1306 | |
1307 | /* Update producer index. */ |
1308 | AGE_DESC_INC(prod, AGE_TX_RING_CNT); |
1309 | sc->age_cdata.age_tx_prod = prod; |
1310 | |
1311 | return 0; |
1312 | } |
1313 | |
1314 | static void |
1315 | age_txintr(struct age_softc *sc, int tpd_cons) |
1316 | { |
1317 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1318 | struct age_txdesc *txd; |
1319 | int cons, prog; |
1320 | |
1321 | |
1322 | if (sc->age_cdata.age_tx_cnt <= 0) { |
1323 | if (ifp->if_timer != 0) |
1324 | printf("timer running without packets\n" ); |
1325 | if (sc->age_cdata.age_tx_cnt) |
1326 | printf("age_tx_cnt corrupted\n" ); |
1327 | } |
1328 | |
1329 | /* |
1330 | * Go through our Tx list and free mbufs for those |
1331 | * frames which have been transmitted. |
1332 | */ |
1333 | cons = sc->age_cdata.age_tx_cons; |
1334 | for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { |
1335 | if (sc->age_cdata.age_tx_cnt <= 0) |
1336 | break; |
1337 | prog++; |
1338 | ifp->if_flags &= ~IFF_OACTIVE; |
1339 | sc->age_cdata.age_tx_cnt--; |
1340 | txd = &sc->age_cdata.age_txdesc[cons]; |
1341 | /* |
1342 | * Clear Tx descriptors, it's not required but would |
1343 | * help debugging in case of Tx issues. |
1344 | */ |
1345 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, |
1346 | cons * sizeof(struct tx_desc), sizeof(struct tx_desc), |
1347 | BUS_DMASYNC_POSTWRITE); |
1348 | txd->tx_desc->addr = 0; |
1349 | txd->tx_desc->len = 0; |
1350 | txd->tx_desc->flags = 0; |
1351 | |
1352 | if (txd->tx_m == NULL) |
1353 | continue; |
1354 | /* Reclaim transmitted mbufs. */ |
1355 | bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); |
1356 | m_freem(txd->tx_m); |
1357 | txd->tx_m = NULL; |
1358 | } |
1359 | |
1360 | if (prog > 0) { |
1361 | sc->age_cdata.age_tx_cons = cons; |
1362 | |
1363 | /* |
1364 | * Unarm watchdog timer only when there are no pending |
1365 | * Tx descriptors in queue. |
1366 | */ |
1367 | if (sc->age_cdata.age_tx_cnt == 0) |
1368 | ifp->if_timer = 0; |
1369 | } |
1370 | } |
1371 | |
1372 | /* Receive a frame. */ |
1373 | static void |
1374 | age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) |
1375 | { |
1376 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1377 | struct age_rxdesc *rxd; |
1378 | struct rx_desc *desc; |
1379 | struct mbuf *mp, *m; |
1380 | uint32_t status, index; |
1381 | int count, nsegs, pktlen; |
1382 | int rx_cons; |
1383 | |
1384 | status = le32toh(rxrd->flags); |
1385 | index = le32toh(rxrd->index); |
1386 | rx_cons = AGE_RX_CONS(index); |
1387 | nsegs = AGE_RX_NSEGS(index); |
1388 | |
1389 | sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); |
1390 | if ((status & AGE_RRD_ERROR) != 0 && |
1391 | (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | |
1392 | AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { |
1393 | /* |
1394 | * We want to pass the following frames to upper |
1395 | * layer regardless of error status of Rx return |
1396 | * ring. |
1397 | * |
1398 | * o IP/TCP/UDP checksum is bad. |
1399 | * o frame length and protocol specific length |
1400 | * does not match. |
1401 | */ |
1402 | sc->age_cdata.age_rx_cons += nsegs; |
1403 | sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; |
1404 | return; |
1405 | } |
1406 | |
1407 | pktlen = 0; |
1408 | for (count = 0; count < nsegs; count++, |
1409 | AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { |
1410 | rxd = &sc->age_cdata.age_rxdesc[rx_cons]; |
1411 | mp = rxd->rx_m; |
1412 | desc = rxd->rx_desc; |
1413 | /* Add a new receive buffer to the ring. */ |
1414 | if (age_newbuf(sc, rxd, 0) != 0) { |
1415 | ifp->if_iqdrops++; |
1416 | /* Reuse Rx buffers. */ |
1417 | if (sc->age_cdata.age_rxhead != NULL) { |
1418 | m_freem(sc->age_cdata.age_rxhead); |
1419 | AGE_RXCHAIN_RESET(sc); |
1420 | } |
1421 | break; |
1422 | } |
1423 | |
1424 | /* The length of the first mbuf is computed last. */ |
1425 | if (count != 0) { |
1426 | mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); |
1427 | pktlen += mp->m_len; |
1428 | } |
1429 | |
1430 | /* Chain received mbufs. */ |
1431 | if (sc->age_cdata.age_rxhead == NULL) { |
1432 | sc->age_cdata.age_rxhead = mp; |
1433 | sc->age_cdata.age_rxtail = mp; |
1434 | } else { |
1435 | mp->m_flags &= ~M_PKTHDR; |
1436 | sc->age_cdata.age_rxprev_tail = |
1437 | sc->age_cdata.age_rxtail; |
1438 | sc->age_cdata.age_rxtail->m_next = mp; |
1439 | sc->age_cdata.age_rxtail = mp; |
1440 | } |
1441 | |
1442 | if (count == nsegs - 1) { |
1443 | /* |
1444 | * It seems that L1 controller has no way |
1445 | * to tell hardware to strip CRC bytes. |
1446 | */ |
1447 | sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; |
1448 | if (nsegs > 1) { |
1449 | /* Remove the CRC bytes in chained mbufs. */ |
1450 | pktlen -= ETHER_CRC_LEN; |
1451 | if (mp->m_len <= ETHER_CRC_LEN) { |
1452 | sc->age_cdata.age_rxtail = |
1453 | sc->age_cdata.age_rxprev_tail; |
1454 | sc->age_cdata.age_rxtail->m_len -= |
1455 | (ETHER_CRC_LEN - mp->m_len); |
1456 | sc->age_cdata.age_rxtail->m_next = NULL; |
1457 | m_freem(mp); |
1458 | } else { |
1459 | mp->m_len -= ETHER_CRC_LEN; |
1460 | } |
1461 | } |
1462 | |
1463 | m = sc->age_cdata.age_rxhead; |
1464 | m->m_flags |= M_PKTHDR; |
1465 | m_set_rcvif(m, ifp); |
1466 | m->m_pkthdr.len = sc->age_cdata.age_rxlen; |
1467 | /* Set the first mbuf length. */ |
1468 | m->m_len = sc->age_cdata.age_rxlen - pktlen; |
1469 | |
1470 | /* |
1471 | * Set checksum information. |
1472 | * It seems that L1 controller can compute partial |
1473 | * checksum. The partial checksum value can be used |
1474 | * to accelerate checksum computation for fragmented |
1475 | * TCP/UDP packets. Upper network stack already |
1476 | * takes advantage of the partial checksum value in |
1477 | * IP reassembly stage. But I'm not sure the |
1478 | * correctness of the partial hardware checksum |
1479 | * assistance due to lack of data sheet. If it is |
1480 | * proven to work on L1 I'll enable it. |
1481 | */ |
1482 | if (status & AGE_RRD_IPV4) { |
1483 | if (status & AGE_RRD_IPCSUM_NOK) |
1484 | m->m_pkthdr.csum_flags |= |
1485 | M_CSUM_IPv4_BAD; |
1486 | if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && |
1487 | (status & AGE_RRD_TCP_UDPCSUM_NOK)) { |
1488 | m->m_pkthdr.csum_flags |= |
1489 | M_CSUM_TCP_UDP_BAD; |
1490 | } |
1491 | /* |
1492 | * Don't mark bad checksum for TCP/UDP frames |
1493 | * as fragmented frames may always have set |
1494 | * bad checksummed bit of descriptor status. |
1495 | */ |
1496 | } |
1497 | #if NVLAN > 0 |
1498 | /* Check for VLAN tagged frames. */ |
1499 | if (status & AGE_RRD_VLAN) { |
1500 | uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); |
1501 | VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag), |
1502 | continue); |
1503 | } |
1504 | #endif |
1505 | |
1506 | bpf_mtap(ifp, m); |
1507 | /* Pass it on. */ |
1508 | if_percpuq_enqueue(ifp->if_percpuq, m); |
1509 | |
1510 | /* Reset mbuf chains. */ |
1511 | AGE_RXCHAIN_RESET(sc); |
1512 | } |
1513 | } |
1514 | |
1515 | if (count != nsegs) { |
1516 | sc->age_cdata.age_rx_cons += nsegs; |
1517 | sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; |
1518 | } else |
1519 | sc->age_cdata.age_rx_cons = rx_cons; |
1520 | } |
1521 | |
1522 | static void |
1523 | age_rxintr(struct age_softc *sc, int rr_prod) |
1524 | { |
1525 | struct rx_rdesc *rxrd; |
1526 | int rr_cons, nsegs, pktlen, prog; |
1527 | |
1528 | rr_cons = sc->age_cdata.age_rr_cons; |
1529 | if (rr_cons == rr_prod) |
1530 | return; |
1531 | |
1532 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, |
1533 | sc->age_cdata.age_rr_ring_map->dm_mapsize, |
1534 | BUS_DMASYNC_POSTREAD); |
1535 | |
1536 | for (prog = 0; rr_cons != rr_prod; prog++) { |
1537 | rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; |
1538 | nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); |
1539 | if (nsegs == 0) |
1540 | break; |
1541 | /* |
1542 | * Check number of segments against received bytes |
1543 | * Non-matching value would indicate that hardware |
1544 | * is still trying to update Rx return descriptors. |
1545 | * I'm not sure whether this check is really needed. |
1546 | */ |
1547 | pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); |
1548 | if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / |
1549 | (MCLBYTES - ETHER_ALIGN))) |
1550 | break; |
1551 | |
1552 | /* Received a frame. */ |
1553 | age_rxeof(sc, rxrd); |
1554 | |
1555 | /* Clear return ring. */ |
1556 | rxrd->index = 0; |
1557 | AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); |
1558 | } |
1559 | |
1560 | if (prog > 0) { |
1561 | /* Update the consumer index. */ |
1562 | sc->age_cdata.age_rr_cons = rr_cons; |
1563 | |
1564 | /* Sync descriptors. */ |
1565 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, |
1566 | sc->age_cdata.age_rr_ring_map->dm_mapsize, |
1567 | BUS_DMASYNC_PREWRITE); |
1568 | |
1569 | /* Notify hardware availability of new Rx buffers. */ |
1570 | AGE_COMMIT_MBOX(sc); |
1571 | } |
1572 | } |
1573 | |
1574 | static void |
1575 | age_tick(void *xsc) |
1576 | { |
1577 | struct age_softc *sc = xsc; |
1578 | struct mii_data *mii = &sc->sc_miibus; |
1579 | int s; |
1580 | |
1581 | s = splnet(); |
1582 | mii_tick(mii); |
1583 | splx(s); |
1584 | |
1585 | callout_schedule(&sc->sc_tick_ch, hz); |
1586 | } |
1587 | |
1588 | static void |
1589 | age_reset(struct age_softc *sc) |
1590 | { |
1591 | uint32_t reg; |
1592 | int i; |
1593 | |
1594 | CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); |
1595 | CSR_READ_4(sc, AGE_MASTER_CFG); |
1596 | DELAY(1000); |
1597 | for (i = AGE_RESET_TIMEOUT; i > 0; i--) { |
1598 | if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) |
1599 | break; |
1600 | DELAY(10); |
1601 | } |
1602 | |
1603 | if (i == 0) |
1604 | printf("%s: reset timeout(0x%08x)!\n" , device_xname(sc->sc_dev), |
1605 | reg); |
1606 | |
1607 | /* Initialize PCIe module. From Linux. */ |
1608 | CSR_WRITE_4(sc, 0x12FC, 0x6500); |
1609 | CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); |
1610 | } |
1611 | |
1612 | static int |
1613 | age_init(struct ifnet *ifp) |
1614 | { |
1615 | struct age_softc *sc = ifp->if_softc; |
1616 | struct mii_data *mii; |
1617 | uint8_t eaddr[ETHER_ADDR_LEN]; |
1618 | bus_addr_t paddr; |
1619 | uint32_t reg, fsize; |
1620 | uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; |
1621 | int error; |
1622 | |
1623 | /* |
1624 | * Cancel any pending I/O. |
1625 | */ |
1626 | age_stop(ifp, 0); |
1627 | |
1628 | /* |
1629 | * Reset the chip to a known state. |
1630 | */ |
1631 | age_reset(sc); |
1632 | |
1633 | /* Initialize descriptors. */ |
1634 | error = age_init_rx_ring(sc); |
1635 | if (error != 0) { |
1636 | printf("%s: no memory for Rx buffers.\n" , device_xname(sc->sc_dev)); |
1637 | age_stop(ifp, 0); |
1638 | return error; |
1639 | } |
1640 | age_init_rr_ring(sc); |
1641 | age_init_tx_ring(sc); |
1642 | age_init_cmb_block(sc); |
1643 | age_init_smb_block(sc); |
1644 | |
1645 | /* Reprogram the station address. */ |
1646 | memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); |
1647 | CSR_WRITE_4(sc, AGE_PAR0, |
1648 | eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); |
1649 | CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); |
1650 | |
1651 | /* Set descriptor base addresses. */ |
1652 | paddr = sc->age_rdata.age_tx_ring_paddr; |
1653 | CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); |
1654 | paddr = sc->age_rdata.age_rx_ring_paddr; |
1655 | CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); |
1656 | paddr = sc->age_rdata.age_rr_ring_paddr; |
1657 | CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); |
1658 | paddr = sc->age_rdata.age_tx_ring_paddr; |
1659 | CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); |
1660 | paddr = sc->age_rdata.age_cmb_block_paddr; |
1661 | CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); |
1662 | paddr = sc->age_rdata.age_smb_block_paddr; |
1663 | CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); |
1664 | |
1665 | /* Set Rx/Rx return descriptor counter. */ |
1666 | CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, |
1667 | ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & |
1668 | DESC_RRD_CNT_MASK) | |
1669 | ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); |
1670 | |
1671 | /* Set Tx descriptor counter. */ |
1672 | CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, |
1673 | (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); |
1674 | |
1675 | /* Tell hardware that we're ready to load descriptors. */ |
1676 | CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); |
1677 | |
1678 | /* |
1679 | * Initialize mailbox register. |
1680 | * Updated producer/consumer index information is exchanged |
1681 | * through this mailbox register. However Tx producer and |
1682 | * Rx return consumer/Rx producer are all shared such that |
1683 | * it's hard to separate code path between Tx and Rx without |
1684 | * locking. If L1 hardware have a separate mail box register |
1685 | * for Tx and Rx consumer/producer management we could have |
1686 | * indepent Tx/Rx handler which in turn Rx handler could have |
1687 | * been run without any locking. |
1688 | */ |
1689 | AGE_COMMIT_MBOX(sc); |
1690 | |
1691 | /* Configure IPG/IFG parameters. */ |
1692 | CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, |
1693 | ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | |
1694 | ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | |
1695 | ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | |
1696 | ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); |
1697 | |
1698 | /* Set parameters for half-duplex media. */ |
1699 | CSR_WRITE_4(sc, AGE_HDPX_CFG, |
1700 | ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & |
1701 | HDPX_CFG_LCOL_MASK) | |
1702 | ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & |
1703 | HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | |
1704 | ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & |
1705 | HDPX_CFG_ABEBT_MASK) | |
1706 | ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & |
1707 | HDPX_CFG_JAMIPG_MASK)); |
1708 | |
1709 | /* Configure interrupt moderation timer. */ |
1710 | sc->age_int_mod = AGE_IM_TIMER_DEFAULT; |
1711 | CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); |
1712 | reg = CSR_READ_4(sc, AGE_MASTER_CFG); |
1713 | reg &= ~MASTER_MTIMER_ENB; |
1714 | if (AGE_USECS(sc->age_int_mod) == 0) |
1715 | reg &= ~MASTER_ITIMER_ENB; |
1716 | else |
1717 | reg |= MASTER_ITIMER_ENB; |
1718 | CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); |
1719 | if (agedebug) |
1720 | printf("%s: interrupt moderation is %d us.\n" , |
1721 | device_xname(sc->sc_dev), sc->age_int_mod); |
1722 | CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); |
1723 | |
1724 | /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ |
1725 | if (ifp->if_mtu < ETHERMTU) |
1726 | sc->age_max_frame_size = ETHERMTU; |
1727 | else |
1728 | sc->age_max_frame_size = ifp->if_mtu; |
1729 | sc->age_max_frame_size += ETHER_HDR_LEN + |
1730 | sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; |
1731 | CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); |
1732 | |
1733 | /* Configure jumbo frame. */ |
1734 | fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); |
1735 | CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, |
1736 | (((fsize / sizeof(uint64_t)) << |
1737 | RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | |
1738 | ((RXQ_JUMBO_CFG_LKAH_DEFAULT << |
1739 | RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | |
1740 | ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & |
1741 | RXQ_JUMBO_CFG_RRD_TIMER_MASK)); |
1742 | |
1743 | /* Configure flow-control parameters. From Linux. */ |
1744 | if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { |
1745 | /* |
1746 | * Magic workaround for old-L1. |
1747 | * Don't know which hw revision requires this magic. |
1748 | */ |
1749 | CSR_WRITE_4(sc, 0x12FC, 0x6500); |
1750 | /* |
1751 | * Another magic workaround for flow-control mode |
1752 | * change. From Linux. |
1753 | */ |
1754 | CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); |
1755 | } |
1756 | /* |
1757 | * TODO |
1758 | * Should understand pause parameter relationships between FIFO |
1759 | * size and number of Rx descriptors and Rx return descriptors. |
1760 | * |
1761 | * Magic parameters came from Linux. |
1762 | */ |
1763 | switch (sc->age_chip_rev) { |
1764 | case 0x8001: |
1765 | case 0x9001: |
1766 | case 0x9002: |
1767 | case 0x9003: |
1768 | rxf_hi = AGE_RX_RING_CNT / 16; |
1769 | rxf_lo = (AGE_RX_RING_CNT * 7) / 8; |
1770 | rrd_hi = (AGE_RR_RING_CNT * 7) / 8; |
1771 | rrd_lo = AGE_RR_RING_CNT / 16; |
1772 | break; |
1773 | default: |
1774 | reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); |
1775 | rxf_lo = reg / 16; |
1776 | if (rxf_lo < 192) |
1777 | rxf_lo = 192; |
1778 | rxf_hi = (reg * 7) / 8; |
1779 | if (rxf_hi < rxf_lo) |
1780 | rxf_hi = rxf_lo + 16; |
1781 | reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); |
1782 | rrd_lo = reg / 8; |
1783 | rrd_hi = (reg * 7) / 8; |
1784 | if (rrd_lo < 2) |
1785 | rrd_lo = 2; |
1786 | if (rrd_hi < rrd_lo) |
1787 | rrd_hi = rrd_lo + 3; |
1788 | break; |
1789 | } |
1790 | CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, |
1791 | ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & |
1792 | RXQ_FIFO_PAUSE_THRESH_LO_MASK) | |
1793 | ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & |
1794 | RXQ_FIFO_PAUSE_THRESH_HI_MASK)); |
1795 | CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, |
1796 | ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & |
1797 | RXQ_RRD_PAUSE_THRESH_LO_MASK) | |
1798 | ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & |
1799 | RXQ_RRD_PAUSE_THRESH_HI_MASK)); |
1800 | |
1801 | /* Configure RxQ. */ |
1802 | CSR_WRITE_4(sc, AGE_RXQ_CFG, |
1803 | ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & |
1804 | RXQ_CFG_RD_BURST_MASK) | |
1805 | ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << |
1806 | RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | |
1807 | ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << |
1808 | RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | |
1809 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); |
1810 | |
1811 | /* Configure TxQ. */ |
1812 | CSR_WRITE_4(sc, AGE_TXQ_CFG, |
1813 | ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & |
1814 | TXQ_CFG_TPD_BURST_MASK) | |
1815 | ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & |
1816 | TXQ_CFG_TX_FIFO_BURST_MASK) | |
1817 | ((TXQ_CFG_TPD_FETCH_DEFAULT << |
1818 | TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | |
1819 | TXQ_CFG_ENB); |
1820 | |
1821 | /* Configure DMA parameters. */ |
1822 | CSR_WRITE_4(sc, AGE_DMA_CFG, |
1823 | DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | |
1824 | sc->age_dma_rd_burst | DMA_CFG_RD_ENB | |
1825 | sc->age_dma_wr_burst | DMA_CFG_WR_ENB); |
1826 | |
1827 | /* Configure CMB DMA write threshold. */ |
1828 | CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, |
1829 | ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & |
1830 | CMB_WR_THRESH_RRD_MASK) | |
1831 | ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & |
1832 | CMB_WR_THRESH_TPD_MASK)); |
1833 | |
1834 | /* Set CMB/SMB timer and enable them. */ |
1835 | CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, |
1836 | ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | |
1837 | ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); |
1838 | |
1839 | /* Request SMB updates for every seconds. */ |
1840 | CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); |
1841 | CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); |
1842 | |
1843 | /* |
1844 | * Disable all WOL bits as WOL can interfere normal Rx |
1845 | * operation. |
1846 | */ |
1847 | CSR_WRITE_4(sc, AGE_WOL_CFG, 0); |
1848 | |
1849 | /* |
1850 | * Configure Tx/Rx MACs. |
1851 | * - Auto-padding for short frames. |
1852 | * - Enable CRC generation. |
1853 | * Start with full-duplex/1000Mbps media. Actual reconfiguration |
1854 | * of MAC is followed after link establishment. |
1855 | */ |
1856 | CSR_WRITE_4(sc, AGE_MAC_CFG, |
1857 | MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | |
1858 | MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | |
1859 | ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & |
1860 | MAC_CFG_PREAMBLE_MASK)); |
1861 | |
1862 | /* Set up the receive filter. */ |
1863 | age_rxfilter(sc); |
1864 | age_rxvlan(sc); |
1865 | |
1866 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
1867 | reg |= MAC_CFG_RXCSUM_ENB; |
1868 | |
1869 | /* Ack all pending interrupts and clear it. */ |
1870 | CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); |
1871 | CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); |
1872 | |
1873 | /* Finally enable Tx/Rx MAC. */ |
1874 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); |
1875 | |
1876 | sc->age_flags &= ~AGE_FLAG_LINK; |
1877 | |
1878 | /* Switch to the current media. */ |
1879 | mii = &sc->sc_miibus; |
1880 | mii_mediachg(mii); |
1881 | |
1882 | callout_schedule(&sc->sc_tick_ch, hz); |
1883 | |
1884 | ifp->if_flags |= IFF_RUNNING; |
1885 | ifp->if_flags &= ~IFF_OACTIVE; |
1886 | |
1887 | return 0; |
1888 | } |
1889 | |
1890 | static void |
1891 | age_stop(struct ifnet *ifp, int disable) |
1892 | { |
1893 | struct age_softc *sc = ifp->if_softc; |
1894 | struct age_txdesc *txd; |
1895 | struct age_rxdesc *rxd; |
1896 | uint32_t reg; |
1897 | int i; |
1898 | |
1899 | callout_stop(&sc->sc_tick_ch); |
1900 | |
1901 | /* |
1902 | * Mark the interface down and cancel the watchdog timer. |
1903 | */ |
1904 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1905 | ifp->if_timer = 0; |
1906 | |
1907 | sc->age_flags &= ~AGE_FLAG_LINK; |
1908 | |
1909 | mii_down(&sc->sc_miibus); |
1910 | |
1911 | /* |
1912 | * Disable interrupts. |
1913 | */ |
1914 | CSR_WRITE_4(sc, AGE_INTR_MASK, 0); |
1915 | CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); |
1916 | |
1917 | /* Stop CMB/SMB updates. */ |
1918 | CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); |
1919 | |
1920 | /* Stop Rx/Tx MAC. */ |
1921 | age_stop_rxmac(sc); |
1922 | age_stop_txmac(sc); |
1923 | |
1924 | /* Stop DMA. */ |
1925 | CSR_WRITE_4(sc, AGE_DMA_CFG, |
1926 | CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); |
1927 | |
1928 | /* Stop TxQ/RxQ. */ |
1929 | CSR_WRITE_4(sc, AGE_TXQ_CFG, |
1930 | CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); |
1931 | CSR_WRITE_4(sc, AGE_RXQ_CFG, |
1932 | CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); |
1933 | for (i = AGE_RESET_TIMEOUT; i > 0; i--) { |
1934 | if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) |
1935 | break; |
1936 | DELAY(10); |
1937 | } |
1938 | if (i == 0) |
1939 | printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n" , |
1940 | device_xname(sc->sc_dev), reg); |
1941 | |
1942 | /* Reclaim Rx buffers that have been processed. */ |
1943 | if (sc->age_cdata.age_rxhead != NULL) |
1944 | m_freem(sc->age_cdata.age_rxhead); |
1945 | AGE_RXCHAIN_RESET(sc); |
1946 | |
1947 | /* |
1948 | * Free RX and TX mbufs still in the queues. |
1949 | */ |
1950 | for (i = 0; i < AGE_RX_RING_CNT; i++) { |
1951 | rxd = &sc->age_cdata.age_rxdesc[i]; |
1952 | if (rxd->rx_m != NULL) { |
1953 | bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); |
1954 | m_freem(rxd->rx_m); |
1955 | rxd->rx_m = NULL; |
1956 | } |
1957 | } |
1958 | for (i = 0; i < AGE_TX_RING_CNT; i++) { |
1959 | txd = &sc->age_cdata.age_txdesc[i]; |
1960 | if (txd->tx_m != NULL) { |
1961 | bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); |
1962 | m_freem(txd->tx_m); |
1963 | txd->tx_m = NULL; |
1964 | } |
1965 | } |
1966 | } |
1967 | |
1968 | static void |
1969 | age_stats_update(struct age_softc *sc) |
1970 | { |
1971 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1972 | struct age_stats *stat; |
1973 | struct smb *smb; |
1974 | |
1975 | stat = &sc->age_stat; |
1976 | |
1977 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, |
1978 | sc->age_cdata.age_smb_block_map->dm_mapsize, |
1979 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1980 | |
1981 | smb = sc->age_rdata.age_smb_block; |
1982 | if (smb->updated == 0) |
1983 | return; |
1984 | |
1985 | /* Rx stats. */ |
1986 | stat->rx_frames += smb->rx_frames; |
1987 | stat->rx_bcast_frames += smb->rx_bcast_frames; |
1988 | stat->rx_mcast_frames += smb->rx_mcast_frames; |
1989 | stat->rx_pause_frames += smb->rx_pause_frames; |
1990 | stat->rx_control_frames += smb->rx_control_frames; |
1991 | stat->rx_crcerrs += smb->rx_crcerrs; |
1992 | stat->rx_lenerrs += smb->rx_lenerrs; |
1993 | stat->rx_bytes += smb->rx_bytes; |
1994 | stat->rx_runts += smb->rx_runts; |
1995 | stat->rx_fragments += smb->rx_fragments; |
1996 | stat->rx_pkts_64 += smb->rx_pkts_64; |
1997 | stat->rx_pkts_65_127 += smb->rx_pkts_65_127; |
1998 | stat->rx_pkts_128_255 += smb->rx_pkts_128_255; |
1999 | stat->rx_pkts_256_511 += smb->rx_pkts_256_511; |
2000 | stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; |
2001 | stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; |
2002 | stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; |
2003 | stat->rx_pkts_truncated += smb->rx_pkts_truncated; |
2004 | stat->rx_fifo_oflows += smb->rx_fifo_oflows; |
2005 | stat->rx_desc_oflows += smb->rx_desc_oflows; |
2006 | stat->rx_alignerrs += smb->rx_alignerrs; |
2007 | stat->rx_bcast_bytes += smb->rx_bcast_bytes; |
2008 | stat->rx_mcast_bytes += smb->rx_mcast_bytes; |
2009 | stat->rx_pkts_filtered += smb->rx_pkts_filtered; |
2010 | |
2011 | /* Tx stats. */ |
2012 | stat->tx_frames += smb->tx_frames; |
2013 | stat->tx_bcast_frames += smb->tx_bcast_frames; |
2014 | stat->tx_mcast_frames += smb->tx_mcast_frames; |
2015 | stat->tx_pause_frames += smb->tx_pause_frames; |
2016 | stat->tx_excess_defer += smb->tx_excess_defer; |
2017 | stat->tx_control_frames += smb->tx_control_frames; |
2018 | stat->tx_deferred += smb->tx_deferred; |
2019 | stat->tx_bytes += smb->tx_bytes; |
2020 | stat->tx_pkts_64 += smb->tx_pkts_64; |
2021 | stat->tx_pkts_65_127 += smb->tx_pkts_65_127; |
2022 | stat->tx_pkts_128_255 += smb->tx_pkts_128_255; |
2023 | stat->tx_pkts_256_511 += smb->tx_pkts_256_511; |
2024 | stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; |
2025 | stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; |
2026 | stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; |
2027 | stat->tx_single_colls += smb->tx_single_colls; |
2028 | stat->tx_multi_colls += smb->tx_multi_colls; |
2029 | stat->tx_late_colls += smb->tx_late_colls; |
2030 | stat->tx_excess_colls += smb->tx_excess_colls; |
2031 | stat->tx_underrun += smb->tx_underrun; |
2032 | stat->tx_desc_underrun += smb->tx_desc_underrun; |
2033 | stat->tx_lenerrs += smb->tx_lenerrs; |
2034 | stat->tx_pkts_truncated += smb->tx_pkts_truncated; |
2035 | stat->tx_bcast_bytes += smb->tx_bcast_bytes; |
2036 | stat->tx_mcast_bytes += smb->tx_mcast_bytes; |
2037 | |
2038 | /* Update counters in ifnet. */ |
2039 | ifp->if_opackets += smb->tx_frames; |
2040 | |
2041 | ifp->if_collisions += smb->tx_single_colls + |
2042 | smb->tx_multi_colls + smb->tx_late_colls + |
2043 | smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; |
2044 | |
2045 | ifp->if_oerrors += smb->tx_excess_colls + |
2046 | smb->tx_late_colls + smb->tx_underrun + |
2047 | smb->tx_pkts_truncated; |
2048 | |
2049 | ifp->if_ipackets += smb->rx_frames; |
2050 | |
2051 | ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + |
2052 | smb->rx_runts + smb->rx_pkts_truncated + |
2053 | smb->rx_fifo_oflows + smb->rx_desc_oflows + |
2054 | smb->rx_alignerrs; |
2055 | |
2056 | /* Update done, clear. */ |
2057 | smb->updated = 0; |
2058 | |
2059 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, |
2060 | sc->age_cdata.age_smb_block_map->dm_mapsize, |
2061 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2062 | } |
2063 | |
2064 | static void |
2065 | age_stop_txmac(struct age_softc *sc) |
2066 | { |
2067 | uint32_t reg; |
2068 | int i; |
2069 | |
2070 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
2071 | if ((reg & MAC_CFG_TX_ENB) != 0) { |
2072 | reg &= ~MAC_CFG_TX_ENB; |
2073 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); |
2074 | } |
2075 | /* Stop Tx DMA engine. */ |
2076 | reg = CSR_READ_4(sc, AGE_DMA_CFG); |
2077 | if ((reg & DMA_CFG_RD_ENB) != 0) { |
2078 | reg &= ~DMA_CFG_RD_ENB; |
2079 | CSR_WRITE_4(sc, AGE_DMA_CFG, reg); |
2080 | } |
2081 | for (i = AGE_RESET_TIMEOUT; i > 0; i--) { |
2082 | if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & |
2083 | (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) |
2084 | break; |
2085 | DELAY(10); |
2086 | } |
2087 | if (i == 0) |
2088 | printf("%s: stopping TxMAC timeout!\n" , device_xname(sc->sc_dev)); |
2089 | } |
2090 | |
2091 | static void |
2092 | age_stop_rxmac(struct age_softc *sc) |
2093 | { |
2094 | uint32_t reg; |
2095 | int i; |
2096 | |
2097 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
2098 | if ((reg & MAC_CFG_RX_ENB) != 0) { |
2099 | reg &= ~MAC_CFG_RX_ENB; |
2100 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); |
2101 | } |
2102 | /* Stop Rx DMA engine. */ |
2103 | reg = CSR_READ_4(sc, AGE_DMA_CFG); |
2104 | if ((reg & DMA_CFG_WR_ENB) != 0) { |
2105 | reg &= ~DMA_CFG_WR_ENB; |
2106 | CSR_WRITE_4(sc, AGE_DMA_CFG, reg); |
2107 | } |
2108 | for (i = AGE_RESET_TIMEOUT; i > 0; i--) { |
2109 | if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & |
2110 | (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) |
2111 | break; |
2112 | DELAY(10); |
2113 | } |
2114 | if (i == 0) |
2115 | printf("%s: stopping RxMAC timeout!\n" , device_xname(sc->sc_dev)); |
2116 | } |
2117 | |
2118 | static void |
2119 | age_init_tx_ring(struct age_softc *sc) |
2120 | { |
2121 | struct age_ring_data *rd; |
2122 | struct age_txdesc *txd; |
2123 | int i; |
2124 | |
2125 | sc->age_cdata.age_tx_prod = 0; |
2126 | sc->age_cdata.age_tx_cons = 0; |
2127 | sc->age_cdata.age_tx_cnt = 0; |
2128 | |
2129 | rd = &sc->age_rdata; |
2130 | memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ); |
2131 | for (i = 0; i < AGE_TX_RING_CNT; i++) { |
2132 | txd = &sc->age_cdata.age_txdesc[i]; |
2133 | txd->tx_desc = &rd->age_tx_ring[i]; |
2134 | txd->tx_m = NULL; |
2135 | } |
2136 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, |
2137 | sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2138 | } |
2139 | |
2140 | static int |
2141 | age_init_rx_ring(struct age_softc *sc) |
2142 | { |
2143 | struct age_ring_data *rd; |
2144 | struct age_rxdesc *rxd; |
2145 | int i; |
2146 | |
2147 | sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; |
2148 | rd = &sc->age_rdata; |
2149 | memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ); |
2150 | for (i = 0; i < AGE_RX_RING_CNT; i++) { |
2151 | rxd = &sc->age_cdata.age_rxdesc[i]; |
2152 | rxd->rx_m = NULL; |
2153 | rxd->rx_desc = &rd->age_rx_ring[i]; |
2154 | if (age_newbuf(sc, rxd, 1) != 0) |
2155 | return ENOBUFS; |
2156 | } |
2157 | |
2158 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, |
2159 | sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2160 | |
2161 | return 0; |
2162 | } |
2163 | |
2164 | static void |
2165 | age_init_rr_ring(struct age_softc *sc) |
2166 | { |
2167 | struct age_ring_data *rd; |
2168 | |
2169 | sc->age_cdata.age_rr_cons = 0; |
2170 | AGE_RXCHAIN_RESET(sc); |
2171 | |
2172 | rd = &sc->age_rdata; |
2173 | memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ); |
2174 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, |
2175 | sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2176 | } |
2177 | |
2178 | static void |
2179 | age_init_cmb_block(struct age_softc *sc) |
2180 | { |
2181 | struct age_ring_data *rd; |
2182 | |
2183 | rd = &sc->age_rdata; |
2184 | memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ); |
2185 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, |
2186 | sc->age_cdata.age_cmb_block_map->dm_mapsize, |
2187 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
2188 | } |
2189 | |
2190 | static void |
2191 | age_init_smb_block(struct age_softc *sc) |
2192 | { |
2193 | struct age_ring_data *rd; |
2194 | |
2195 | rd = &sc->age_rdata; |
2196 | memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ); |
2197 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, |
2198 | sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2199 | } |
2200 | |
2201 | static int |
2202 | age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) |
2203 | { |
2204 | struct rx_desc *desc; |
2205 | struct mbuf *m; |
2206 | bus_dmamap_t map; |
2207 | int error; |
2208 | |
2209 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
2210 | if (m == NULL) |
2211 | return ENOBUFS; |
2212 | MCLGET(m, M_DONTWAIT); |
2213 | if (!(m->m_flags & M_EXT)) { |
2214 | m_freem(m); |
2215 | return ENOBUFS; |
2216 | } |
2217 | |
2218 | m->m_len = m->m_pkthdr.len = MCLBYTES; |
2219 | m_adj(m, ETHER_ALIGN); |
2220 | |
2221 | error = bus_dmamap_load_mbuf(sc->sc_dmat, |
2222 | sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); |
2223 | |
2224 | if (error != 0) { |
2225 | m_freem(m); |
2226 | |
2227 | if (init) |
2228 | printf("%s: can't load RX mbuf\n" , device_xname(sc->sc_dev)); |
2229 | return error; |
2230 | } |
2231 | |
2232 | if (rxd->rx_m != NULL) { |
2233 | bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, |
2234 | rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2235 | bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); |
2236 | } |
2237 | map = rxd->rx_dmamap; |
2238 | rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; |
2239 | sc->age_cdata.age_rx_sparemap = map; |
2240 | rxd->rx_m = m; |
2241 | |
2242 | desc = rxd->rx_desc; |
2243 | desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); |
2244 | desc->len = |
2245 | htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << |
2246 | AGE_RD_LEN_SHIFT); |
2247 | |
2248 | return 0; |
2249 | } |
2250 | |
2251 | static void |
2252 | age_rxvlan(struct age_softc *sc) |
2253 | { |
2254 | uint32_t reg; |
2255 | |
2256 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
2257 | reg &= ~MAC_CFG_VLAN_TAG_STRIP; |
2258 | if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) |
2259 | reg |= MAC_CFG_VLAN_TAG_STRIP; |
2260 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); |
2261 | } |
2262 | |
2263 | static void |
2264 | age_rxfilter(struct age_softc *sc) |
2265 | { |
2266 | struct ethercom *ec = &sc->sc_ec; |
2267 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
2268 | struct ether_multi *enm; |
2269 | struct ether_multistep step; |
2270 | uint32_t crc; |
2271 | uint32_t mchash[2]; |
2272 | uint32_t rxcfg; |
2273 | |
2274 | rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); |
2275 | rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); |
2276 | ifp->if_flags &= ~IFF_ALLMULTI; |
2277 | |
2278 | /* |
2279 | * Always accept broadcast frames. |
2280 | */ |
2281 | rxcfg |= MAC_CFG_BCAST; |
2282 | |
2283 | if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { |
2284 | ifp->if_flags |= IFF_ALLMULTI; |
2285 | if (ifp->if_flags & IFF_PROMISC) |
2286 | rxcfg |= MAC_CFG_PROMISC; |
2287 | else |
2288 | rxcfg |= MAC_CFG_ALLMULTI; |
2289 | mchash[0] = mchash[1] = 0xFFFFFFFF; |
2290 | } else { |
2291 | /* Program new filter. */ |
2292 | memset(mchash, 0, sizeof(mchash)); |
2293 | |
2294 | ETHER_FIRST_MULTI(step, ec, enm); |
2295 | while (enm != NULL) { |
2296 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); |
2297 | mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); |
2298 | ETHER_NEXT_MULTI(step, enm); |
2299 | } |
2300 | } |
2301 | |
2302 | CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); |
2303 | CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); |
2304 | CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); |
2305 | } |
2306 | |