1 | /* $NetBSD: if_txp.c,v 1.45 2016/07/14 10:19:06 msaitoh Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2001 |
5 | * Jason L. Wright <jason@thought.net>, Theo de Raadt, and |
6 | * Aaron Campbell <aaron@monkey.org>. All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions |
10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
19 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS |
21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
27 | * THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
29 | |
30 | /* |
31 | * Driver for 3c990 (Typhoon) Ethernet ASIC |
32 | */ |
33 | |
34 | #include <sys/cdefs.h> |
35 | __KERNEL_RCSID(0, "$NetBSD: if_txp.c,v 1.45 2016/07/14 10:19:06 msaitoh Exp $" ); |
36 | |
37 | #include "opt_inet.h" |
38 | |
39 | #include <sys/param.h> |
40 | #include <sys/systm.h> |
41 | #include <sys/sockio.h> |
42 | #include <sys/mbuf.h> |
43 | #include <sys/malloc.h> |
44 | #include <sys/kernel.h> |
45 | #include <sys/socket.h> |
46 | #include <sys/device.h> |
47 | #include <sys/callout.h> |
48 | |
49 | #include <net/if.h> |
50 | #include <net/if_dl.h> |
51 | #include <net/if_types.h> |
52 | #include <net/if_ether.h> |
53 | #include <net/if_arp.h> |
54 | |
55 | #ifdef INET |
56 | #include <netinet/in.h> |
57 | #include <netinet/in_systm.h> |
58 | #include <netinet/in_var.h> |
59 | #include <netinet/ip.h> |
60 | #include <netinet/if_inarp.h> |
61 | #endif |
62 | |
63 | #include <net/if_media.h> |
64 | |
65 | #include <net/bpf.h> |
66 | |
67 | #include <sys/bus.h> |
68 | |
69 | #include <dev/mii/mii.h> |
70 | #include <dev/mii/miivar.h> |
71 | #include <dev/pci/pcireg.h> |
72 | #include <dev/pci/pcivar.h> |
73 | #include <dev/pci/pcidevs.h> |
74 | |
75 | #include <dev/pci/if_txpreg.h> |
76 | |
77 | #include <dev/microcode/typhoon/3c990img.h> |
78 | |
79 | /* |
80 | * These currently break the 3c990 firmware, hopefully will be resolved |
81 | * at some point. |
82 | */ |
83 | #undef TRY_TX_UDP_CSUM |
84 | #undef TRY_TX_TCP_CSUM |
85 | |
86 | int txp_probe(device_t, cfdata_t, void *); |
87 | void txp_attach(device_t, device_t, void *); |
88 | int txp_intr(void *); |
89 | void txp_tick(void *); |
90 | bool txp_shutdown(device_t, int); |
91 | int txp_ioctl(struct ifnet *, u_long, void *); |
92 | void txp_start(struct ifnet *); |
93 | void txp_stop(struct txp_softc *); |
94 | void txp_init(struct txp_softc *); |
95 | void txp_watchdog(struct ifnet *); |
96 | |
97 | int txp_chip_init(struct txp_softc *); |
98 | int txp_reset_adapter(struct txp_softc *); |
99 | int txp_download_fw(struct txp_softc *); |
100 | int txp_download_fw_wait(struct txp_softc *); |
101 | int txp_download_fw_section(struct txp_softc *, |
102 | const struct txp_fw_section_header *, int); |
103 | int txp_alloc_rings(struct txp_softc *); |
104 | void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *); |
105 | int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int); |
106 | void txp_set_filter(struct txp_softc *); |
107 | |
108 | int txp_cmd_desc_numfree(struct txp_softc *); |
109 | int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, |
110 | u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int); |
111 | int txp_command2(struct txp_softc *, u_int16_t, u_int16_t, |
112 | u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, |
113 | struct txp_rsp_desc **, int); |
114 | int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, |
115 | struct txp_rsp_desc **); |
116 | void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *, |
117 | struct txp_rsp_desc *); |
118 | void txp_capabilities(struct txp_softc *); |
119 | |
120 | void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
121 | int txp_ifmedia_upd(struct ifnet *); |
122 | void txp_show_descriptor(void *); |
123 | void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *, |
124 | struct txp_dma_alloc *); |
125 | void txp_rxbuf_reclaim(struct txp_softc *); |
126 | void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, |
127 | struct txp_dma_alloc *); |
128 | |
129 | CFATTACH_DECL_NEW(txp, sizeof(struct txp_softc), txp_probe, txp_attach, |
130 | NULL, NULL); |
131 | |
132 | const struct txp_pci_match { |
133 | int vid, did, flags; |
134 | } txp_devices[] = { |
135 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990, 0 }, |
136 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95, 0 }, |
137 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97, 0 }, |
138 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95, TXP_SERVERVERSION }, |
139 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97, TXP_SERVERVERSION }, |
140 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B, TXP_USESUBSYSTEM }, |
141 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR, TXP_SERVERVERSION }, |
142 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX, TXP_USESUBSYSTEM }, |
143 | }; |
144 | |
145 | static const struct txp_pci_match *txp_pcilookup(pcireg_t); |
146 | |
147 | static const struct { |
148 | u_int16_t mask, value; |
149 | int flags; |
150 | } txp_subsysinfo[] = { |
151 | {0xf000, 0x2000, TXP_SERVERVERSION}, |
152 | {0x0100, 0x0100, TXP_FIBER}, |
153 | #if 0 /* information from 3com header, unused */ |
154 | {0x0010, 0x0010, /* secured firmware */}, |
155 | {0x0003, 0x0000, /* variable DES */}, |
156 | {0x0003, 0x0001, /* single DES - "95" */}, |
157 | {0x0003, 0x0002, /* triple DES - "97" */}, |
158 | #endif |
159 | }; |
160 | |
161 | static const struct txp_pci_match * |
162 | txp_pcilookup(pcireg_t id) |
163 | { |
164 | int i; |
165 | |
166 | for (i = 0; i < __arraycount(txp_devices); i++) |
167 | if (PCI_VENDOR(id) == txp_devices[i].vid && |
168 | PCI_PRODUCT(id) == txp_devices[i].did) |
169 | return &txp_devices[i]; |
170 | return (0); |
171 | } |
172 | |
173 | int |
174 | txp_probe(device_t parent, cfdata_t match, void *aux) |
175 | { |
176 | struct pci_attach_args *pa = aux; |
177 | |
178 | if (txp_pcilookup(pa->pa_id)) |
179 | return (1); |
180 | return (0); |
181 | } |
182 | |
183 | void |
184 | txp_attach(device_t parent, device_t self, void *aux) |
185 | { |
186 | struct txp_softc *sc = device_private(self); |
187 | struct pci_attach_args *pa = aux; |
188 | pci_chipset_tag_t pc = pa->pa_pc; |
189 | pci_intr_handle_t ih; |
190 | const char *intrstr = NULL; |
191 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
192 | u_int32_t command; |
193 | u_int16_t p1; |
194 | u_int32_t p2; |
195 | u_char enaddr[6]; |
196 | const struct txp_pci_match *match; |
197 | u_int16_t subsys; |
198 | int i, flags; |
199 | char devinfo[256]; |
200 | char intrbuf[PCI_INTRSTR_LEN]; |
201 | |
202 | sc->sc_dev = self; |
203 | sc->sc_cold = 1; |
204 | |
205 | match = txp_pcilookup(pa->pa_id); |
206 | flags = match->flags; |
207 | if (match->flags & TXP_USESUBSYSTEM) { |
208 | subsys = PCI_PRODUCT(pci_conf_read(pc, pa->pa_tag, |
209 | PCI_SUBSYS_ID_REG)); |
210 | for (i = 0; |
211 | i < sizeof(txp_subsysinfo)/sizeof(txp_subsysinfo[0]); |
212 | i++) |
213 | if ((subsys & txp_subsysinfo[i].mask) == |
214 | txp_subsysinfo[i].value) |
215 | flags |= txp_subsysinfo[i].flags; |
216 | } |
217 | sc->sc_flags = flags; |
218 | |
219 | aprint_naive("\n" ); |
220 | pci_devinfo(pa->pa_id, 0, 0, devinfo, sizeof(devinfo)); |
221 | #define ((flags & (TXP_USESUBSYSTEM|TXP_SERVERVERSION)) == \ |
222 | (TXP_USESUBSYSTEM|TXP_SERVERVERSION) ? " (SVR)" : "") |
223 | aprint_normal(": %s%s\n%s" , devinfo, TXP_EXTRAINFO, |
224 | device_xname(sc->sc_dev)); |
225 | |
226 | command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
227 | |
228 | if (!(command & PCI_COMMAND_MASTER_ENABLE)) { |
229 | aprint_error(": failed to enable bus mastering\n" ); |
230 | return; |
231 | } |
232 | |
233 | if (!(command & PCI_COMMAND_MEM_ENABLE)) { |
234 | aprint_error(": failed to enable memory mapping\n" ); |
235 | return; |
236 | } |
237 | if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, |
238 | &sc->sc_bt, &sc->sc_bh, NULL, NULL)) { |
239 | aprint_error(": can't map mem space %d\n" , 0); |
240 | return; |
241 | } |
242 | |
243 | sc->sc_dmat = pa->pa_dmat; |
244 | |
245 | /* |
246 | * Allocate our interrupt. |
247 | */ |
248 | if (pci_intr_map(pa, &ih)) { |
249 | aprint_error(": couldn't map interrupt\n" ); |
250 | return; |
251 | } |
252 | |
253 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); |
254 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc); |
255 | if (sc->sc_ih == NULL) { |
256 | aprint_error(": couldn't establish interrupt" ); |
257 | if (intrstr != NULL) |
258 | aprint_normal(" at %s" , intrstr); |
259 | aprint_normal("\n" ); |
260 | return; |
261 | } |
262 | aprint_error(": interrupting at %s\n" , intrstr); |
263 | |
264 | if (txp_chip_init(sc)) |
265 | goto cleanupintr; |
266 | |
267 | if (txp_download_fw(sc)) |
268 | goto cleanupintr; |
269 | |
270 | if (txp_alloc_rings(sc)) |
271 | goto cleanupintr; |
272 | |
273 | if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, |
274 | NULL, NULL, NULL, 1)) |
275 | goto cleanupintr; |
276 | |
277 | if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, |
278 | &p1, &p2, NULL, 1)) |
279 | goto cleanupintr; |
280 | |
281 | txp_set_filter(sc); |
282 | |
283 | p1 = htole16(p1); |
284 | enaddr[0] = ((u_int8_t *)&p1)[1]; |
285 | enaddr[1] = ((u_int8_t *)&p1)[0]; |
286 | p2 = htole32(p2); |
287 | enaddr[2] = ((u_int8_t *)&p2)[3]; |
288 | enaddr[3] = ((u_int8_t *)&p2)[2]; |
289 | enaddr[4] = ((u_int8_t *)&p2)[1]; |
290 | enaddr[5] = ((u_int8_t *)&p2)[0]; |
291 | |
292 | aprint_normal_dev(self, "Ethernet address %s\n" , |
293 | ether_sprintf(enaddr)); |
294 | sc->sc_cold = 0; |
295 | |
296 | ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); |
297 | if (flags & TXP_FIBER) { |
298 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX, |
299 | 0, NULL); |
300 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_HDX, |
301 | 0, NULL); |
302 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_FDX, |
303 | 0, NULL); |
304 | } else { |
305 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, |
306 | 0, NULL); |
307 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, |
308 | 0, NULL); |
309 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, |
310 | 0, NULL); |
311 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, |
312 | 0, NULL); |
313 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, |
314 | 0, NULL); |
315 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, |
316 | 0, NULL); |
317 | } |
318 | ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); |
319 | |
320 | sc->sc_xcvr = TXP_XCVR_AUTO; |
321 | txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, |
322 | NULL, NULL, NULL, 0); |
323 | ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); |
324 | |
325 | ifp->if_softc = sc; |
326 | ifp->if_mtu = ETHERMTU; |
327 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
328 | ifp->if_ioctl = txp_ioctl; |
329 | ifp->if_start = txp_start; |
330 | ifp->if_watchdog = txp_watchdog; |
331 | ifp->if_baudrate = 10000000; |
332 | IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES); |
333 | IFQ_SET_READY(&ifp->if_snd); |
334 | ifp->if_capabilities = 0; |
335 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
336 | |
337 | txp_capabilities(sc); |
338 | |
339 | callout_init(&sc->sc_tick, 0); |
340 | callout_setfunc(&sc->sc_tick, txp_tick, sc); |
341 | |
342 | /* |
343 | * Attach us everywhere |
344 | */ |
345 | if_attach(ifp); |
346 | ether_ifattach(ifp, enaddr); |
347 | |
348 | if (pmf_device_register1(self, NULL, NULL, txp_shutdown)) |
349 | pmf_class_network_register(self, ifp); |
350 | else |
351 | aprint_error_dev(self, "couldn't establish power handler\n" ); |
352 | |
353 | return; |
354 | |
355 | cleanupintr: |
356 | pci_intr_disestablish(pc,sc->sc_ih); |
357 | |
358 | return; |
359 | |
360 | } |
361 | |
362 | int |
363 | txp_chip_init(struct txp_softc *sc) |
364 | { |
365 | /* disable interrupts */ |
366 | WRITE_REG(sc, TXP_IER, 0); |
367 | WRITE_REG(sc, TXP_IMR, |
368 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | |
369 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | |
370 | TXP_INT_LATCH); |
371 | |
372 | /* ack all interrupts */ |
373 | WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | |
374 | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | |
375 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | |
376 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | |
377 | TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); |
378 | |
379 | if (txp_reset_adapter(sc)) |
380 | return (-1); |
381 | |
382 | /* disable interrupts */ |
383 | WRITE_REG(sc, TXP_IER, 0); |
384 | WRITE_REG(sc, TXP_IMR, |
385 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | |
386 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | |
387 | TXP_INT_LATCH); |
388 | |
389 | /* ack all interrupts */ |
390 | WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | |
391 | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | |
392 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | |
393 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | |
394 | TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); |
395 | |
396 | return (0); |
397 | } |
398 | |
399 | int |
400 | txp_reset_adapter(struct txp_softc *sc) |
401 | { |
402 | u_int32_t r; |
403 | int i; |
404 | |
405 | WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); |
406 | DELAY(1000); |
407 | WRITE_REG(sc, TXP_SRR, 0); |
408 | |
409 | /* Should wait max 6 seconds */ |
410 | for (i = 0; i < 6000; i++) { |
411 | r = READ_REG(sc, TXP_A2H_0); |
412 | if (r == STAT_WAITING_FOR_HOST_REQUEST) |
413 | break; |
414 | DELAY(1000); |
415 | } |
416 | |
417 | if (r != STAT_WAITING_FOR_HOST_REQUEST) { |
418 | printf("%s: reset hung\n" , TXP_DEVNAME(sc)); |
419 | return (-1); |
420 | } |
421 | |
422 | return (0); |
423 | } |
424 | |
425 | int |
426 | txp_download_fw(struct txp_softc *sc) |
427 | { |
428 | const struct txp_fw_file_header *; |
429 | const struct txp_fw_section_header *secthead; |
430 | int sect; |
431 | u_int32_t r, i, ier, imr; |
432 | |
433 | ier = READ_REG(sc, TXP_IER); |
434 | WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); |
435 | |
436 | imr = READ_REG(sc, TXP_IMR); |
437 | WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); |
438 | |
439 | for (i = 0; i < 10000; i++) { |
440 | r = READ_REG(sc, TXP_A2H_0); |
441 | if (r == STAT_WAITING_FOR_HOST_REQUEST) |
442 | break; |
443 | DELAY(50); |
444 | } |
445 | if (r != STAT_WAITING_FOR_HOST_REQUEST) { |
446 | printf(": not waiting for host request\n" ); |
447 | return (-1); |
448 | } |
449 | |
450 | /* Ack the status */ |
451 | WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); |
452 | |
453 | fileheader = (const struct txp_fw_file_header *)tc990image; |
454 | if (memcmp("TYPHOON" , fileheader->magicid, |
455 | sizeof(fileheader->magicid))) { |
456 | printf(": fw invalid magic\n" ); |
457 | return (-1); |
458 | } |
459 | |
460 | /* Tell boot firmware to get ready for image */ |
461 | WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr)); |
462 | WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); |
463 | |
464 | if (txp_download_fw_wait(sc)) { |
465 | printf("%s: fw wait failed, initial\n" , |
466 | device_xname(sc->sc_dev)); |
467 | return (-1); |
468 | } |
469 | |
470 | secthead = (const struct txp_fw_section_header *) |
471 | (((const u_int8_t *)tc990image) + |
472 | sizeof(struct txp_fw_file_header)); |
473 | |
474 | for (sect = 0; sect < le32toh(fileheader->nsections); sect++) { |
475 | if (txp_download_fw_section(sc, secthead, sect)) |
476 | return (-1); |
477 | secthead = (const struct txp_fw_section_header *) |
478 | (((const u_int8_t *)secthead) + le32toh(secthead->nbytes) + |
479 | sizeof(*secthead)); |
480 | } |
481 | |
482 | WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); |
483 | |
484 | for (i = 0; i < 10000; i++) { |
485 | r = READ_REG(sc, TXP_A2H_0); |
486 | if (r == STAT_WAITING_FOR_BOOT) |
487 | break; |
488 | DELAY(50); |
489 | } |
490 | if (r != STAT_WAITING_FOR_BOOT) { |
491 | printf(": not waiting for boot\n" ); |
492 | return (-1); |
493 | } |
494 | |
495 | WRITE_REG(sc, TXP_IER, ier); |
496 | WRITE_REG(sc, TXP_IMR, imr); |
497 | |
498 | return (0); |
499 | } |
500 | |
501 | int |
502 | txp_download_fw_wait(struct txp_softc *sc) |
503 | { |
504 | u_int32_t i, r; |
505 | |
506 | for (i = 0; i < 10000; i++) { |
507 | r = READ_REG(sc, TXP_ISR); |
508 | if (r & TXP_INT_A2H_0) |
509 | break; |
510 | DELAY(50); |
511 | } |
512 | |
513 | if (!(r & TXP_INT_A2H_0)) { |
514 | printf(": fw wait failed comm0\n" ); |
515 | return (-1); |
516 | } |
517 | |
518 | WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); |
519 | |
520 | r = READ_REG(sc, TXP_A2H_0); |
521 | if (r != STAT_WAITING_FOR_SEGMENT) { |
522 | printf(": fw not waiting for segment\n" ); |
523 | return (-1); |
524 | } |
525 | return (0); |
526 | } |
527 | |
528 | int |
529 | txp_download_fw_section(struct txp_softc *sc, |
530 | const struct txp_fw_section_header *sect, int sectnum) |
531 | { |
532 | struct txp_dma_alloc dma; |
533 | int rseg, err = 0; |
534 | struct mbuf m; |
535 | #ifdef INET |
536 | u_int16_t csum; |
537 | #endif |
538 | |
539 | /* Skip zero length sections */ |
540 | if (sect->nbytes == 0) |
541 | return (0); |
542 | |
543 | /* Make sure we aren't past the end of the image */ |
544 | rseg = ((const u_int8_t *)sect) - ((const u_int8_t *)tc990image); |
545 | if (rseg >= sizeof(tc990image)) { |
546 | printf(": fw invalid section address, section %d\n" , sectnum); |
547 | return (-1); |
548 | } |
549 | |
550 | /* Make sure this section doesn't go past the end */ |
551 | rseg += le32toh(sect->nbytes); |
552 | if (rseg >= sizeof(tc990image)) { |
553 | printf(": fw truncated section %d\n" , sectnum); |
554 | return (-1); |
555 | } |
556 | |
557 | /* map a buffer, copy segment to it, get physaddr */ |
558 | if (txp_dma_malloc(sc, le32toh(sect->nbytes), &dma, 0)) { |
559 | printf(": fw dma malloc failed, section %d\n" , sectnum); |
560 | return (-1); |
561 | } |
562 | |
563 | memcpy(dma.dma_vaddr, ((const u_int8_t *)sect) + sizeof(*sect), |
564 | le32toh(sect->nbytes)); |
565 | |
566 | /* |
567 | * dummy up mbuf and verify section checksum |
568 | */ |
569 | m.m_type = MT_DATA; |
570 | m.m_next = m.m_nextpkt = NULL; |
571 | m.m_len = le32toh(sect->nbytes); |
572 | m.m_data = dma.dma_vaddr; |
573 | m.m_flags = 0; |
574 | #ifdef INET |
575 | csum = in_cksum(&m, le32toh(sect->nbytes)); |
576 | if (csum != sect->cksum) { |
577 | printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n" , |
578 | sectnum, sect->cksum, csum); |
579 | txp_dma_free(sc, &dma); |
580 | return -1; |
581 | } |
582 | #endif |
583 | |
584 | bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, |
585 | dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
586 | |
587 | WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes)); |
588 | WRITE_REG(sc, TXP_H2A_2, le32toh(sect->cksum)); |
589 | WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr)); |
590 | WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32); |
591 | WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff); |
592 | WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); |
593 | |
594 | if (txp_download_fw_wait(sc)) { |
595 | printf("%s: fw wait failed, section %d\n" , |
596 | device_xname(sc->sc_dev), sectnum); |
597 | err = -1; |
598 | } |
599 | |
600 | bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, |
601 | dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
602 | |
603 | txp_dma_free(sc, &dma); |
604 | return (err); |
605 | } |
606 | |
607 | int |
608 | txp_intr(void *vsc) |
609 | { |
610 | struct txp_softc *sc = vsc; |
611 | struct txp_hostvar *hv = sc->sc_hostvar; |
612 | u_int32_t isr; |
613 | int claimed = 0; |
614 | |
615 | /* mask all interrupts */ |
616 | WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | |
617 | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | |
618 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | |
619 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | |
620 | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); |
621 | |
622 | bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, |
623 | sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); |
624 | |
625 | isr = READ_REG(sc, TXP_ISR); |
626 | while (isr) { |
627 | claimed = 1; |
628 | WRITE_REG(sc, TXP_ISR, isr); |
629 | |
630 | if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) |
631 | txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma); |
632 | if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) |
633 | txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma); |
634 | |
635 | if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) |
636 | txp_rxbuf_reclaim(sc); |
637 | |
638 | if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != |
639 | TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off))))) |
640 | txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma); |
641 | |
642 | if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != |
643 | TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off))))) |
644 | txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma); |
645 | |
646 | isr = READ_REG(sc, TXP_ISR); |
647 | } |
648 | |
649 | bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, |
650 | sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); |
651 | |
652 | /* unmask all interrupts */ |
653 | WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); |
654 | |
655 | txp_start(&sc->sc_arpcom.ec_if); |
656 | |
657 | return (claimed); |
658 | } |
659 | |
660 | void |
661 | txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, |
662 | struct txp_dma_alloc *dma) |
663 | { |
664 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
665 | struct txp_rx_desc *rxd; |
666 | struct mbuf *m; |
667 | struct txp_swdesc *sd; |
668 | u_int32_t roff, woff; |
669 | int sumflags = 0; |
670 | int idx; |
671 | |
672 | roff = le32toh(*r->r_roff); |
673 | woff = le32toh(*r->r_woff); |
674 | idx = roff / sizeof(struct txp_rx_desc); |
675 | rxd = r->r_desc + idx; |
676 | |
677 | while (roff != woff) { |
678 | |
679 | bus_dmamap_sync(sc->sc_dmat, dma->dma_map, |
680 | idx * sizeof(struct txp_rx_desc), |
681 | sizeof(struct txp_rx_desc), BUS_DMASYNC_POSTREAD); |
682 | |
683 | if (rxd->rx_flags & RX_FLAGS_ERROR) { |
684 | printf("%s: error 0x%x\n" , device_xname(sc->sc_dev), |
685 | le32toh(rxd->rx_stat)); |
686 | ifp->if_ierrors++; |
687 | goto next; |
688 | } |
689 | |
690 | /* retrieve stashed pointer */ |
691 | memcpy(&sd, __UNVOLATILE(&rxd->rx_vaddrlo), sizeof(sd)); |
692 | |
693 | bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, |
694 | sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
695 | bus_dmamap_unload(sc->sc_dmat, sd->sd_map); |
696 | bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); |
697 | m = sd->sd_mbuf; |
698 | free(sd, M_DEVBUF); |
699 | m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len); |
700 | |
701 | #ifdef __STRICT_ALIGNMENT |
702 | { |
703 | /* |
704 | * XXX Nice chip, except it won't accept "off by 2" |
705 | * buffers, so we're force to copy. Supposedly |
706 | * this will be fixed in a newer firmware rev |
707 | * and this will be temporary. |
708 | */ |
709 | struct mbuf *mnew; |
710 | |
711 | MGETHDR(mnew, M_DONTWAIT, MT_DATA); |
712 | if (mnew == NULL) { |
713 | m_freem(m); |
714 | goto next; |
715 | } |
716 | if (m->m_len > (MHLEN - 2)) { |
717 | MCLGET(mnew, M_DONTWAIT); |
718 | if (!(mnew->m_flags & M_EXT)) { |
719 | m_freem(mnew); |
720 | m_freem(m); |
721 | goto next; |
722 | } |
723 | } |
724 | m_set_rcvif(mnew, ifp); |
725 | mnew->m_pkthdr.len = mnew->m_len = m->m_len; |
726 | mnew->m_data += 2; |
727 | memcpy(mnew->m_data, m->m_data, m->m_len); |
728 | m_freem(m); |
729 | m = mnew; |
730 | } |
731 | #endif |
732 | |
733 | /* |
734 | * Handle BPF listeners. Let the BPF user see the packet. |
735 | */ |
736 | bpf_mtap(ifp, m); |
737 | |
738 | if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD)) |
739 | sumflags |= (M_CSUM_IPv4|M_CSUM_IPv4_BAD); |
740 | else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD)) |
741 | sumflags |= M_CSUM_IPv4; |
742 | |
743 | if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD)) |
744 | sumflags |= (M_CSUM_TCPv4|M_CSUM_TCP_UDP_BAD); |
745 | else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD)) |
746 | sumflags |= M_CSUM_TCPv4; |
747 | |
748 | if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD)) |
749 | sumflags |= (M_CSUM_UDPv4|M_CSUM_TCP_UDP_BAD); |
750 | else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD)) |
751 | sumflags |= M_CSUM_UDPv4; |
752 | |
753 | m->m_pkthdr.csum_flags = sumflags; |
754 | |
755 | if (rxd->rx_stat & htole32(RX_STAT_VLAN)) { |
756 | VLAN_INPUT_TAG(ifp, m, htons(rxd->rx_vlan >> 16), |
757 | continue); |
758 | } |
759 | |
760 | if_percpuq_enqueue(ifp->if_percpuq, m); |
761 | |
762 | next: |
763 | bus_dmamap_sync(sc->sc_dmat, dma->dma_map, |
764 | idx * sizeof(struct txp_rx_desc), |
765 | sizeof(struct txp_rx_desc), BUS_DMASYNC_PREREAD); |
766 | |
767 | roff += sizeof(struct txp_rx_desc); |
768 | if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { |
769 | idx = 0; |
770 | roff = 0; |
771 | rxd = r->r_desc; |
772 | } else { |
773 | idx++; |
774 | rxd++; |
775 | } |
776 | woff = le32toh(*r->r_woff); |
777 | } |
778 | |
779 | *r->r_roff = htole32(woff); |
780 | } |
781 | |
782 | void |
783 | txp_rxbuf_reclaim(struct txp_softc *sc) |
784 | { |
785 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
786 | struct txp_hostvar *hv = sc->sc_hostvar; |
787 | struct txp_rxbuf_desc *rbd; |
788 | struct txp_swdesc *sd; |
789 | u_int32_t i, end; |
790 | |
791 | end = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx)); |
792 | i = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_write_idx)); |
793 | |
794 | if (++i == RXBUF_ENTRIES) |
795 | i = 0; |
796 | |
797 | rbd = sc->sc_rxbufs + i; |
798 | |
799 | while (i != end) { |
800 | sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), |
801 | M_DEVBUF, M_NOWAIT); |
802 | if (sd == NULL) |
803 | break; |
804 | |
805 | MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); |
806 | if (sd->sd_mbuf == NULL) |
807 | goto err_sd; |
808 | |
809 | MCLGET(sd->sd_mbuf, M_DONTWAIT); |
810 | if ((sd->sd_mbuf->m_flags & M_EXT) == 0) |
811 | goto err_mbuf; |
812 | m_set_rcvif(sd->sd_mbuf, ifp); |
813 | sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; |
814 | if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, |
815 | TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) |
816 | goto err_mbuf; |
817 | if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, |
818 | BUS_DMA_NOWAIT)) { |
819 | bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); |
820 | goto err_mbuf; |
821 | } |
822 | |
823 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, |
824 | i * sizeof(struct txp_rxbuf_desc), |
825 | sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE); |
826 | |
827 | /* stash away pointer */ |
828 | memcpy(__UNVOLATILE(&rbd->rb_vaddrlo), &sd, sizeof(sd)); |
829 | |
830 | rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) |
831 | & 0xffffffff; |
832 | rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) |
833 | >> 32; |
834 | |
835 | bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, |
836 | sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
837 | |
838 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, |
839 | i * sizeof(struct txp_rxbuf_desc), |
840 | sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE); |
841 | |
842 | hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i)); |
843 | |
844 | if (++i == RXBUF_ENTRIES) { |
845 | i = 0; |
846 | rbd = sc->sc_rxbufs; |
847 | } else |
848 | rbd++; |
849 | } |
850 | return; |
851 | |
852 | err_mbuf: |
853 | m_freem(sd->sd_mbuf); |
854 | err_sd: |
855 | free(sd, M_DEVBUF); |
856 | } |
857 | |
858 | /* |
859 | * Reclaim mbufs and entries from a transmit ring. |
860 | */ |
861 | void |
862 | txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r, |
863 | struct txp_dma_alloc *dma) |
864 | { |
865 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
866 | u_int32_t idx = TXP_OFFSET2IDX(le32toh(*(r->r_off))); |
867 | u_int32_t cons = r->r_cons, cnt = r->r_cnt; |
868 | struct txp_tx_desc *txd = r->r_desc + cons; |
869 | struct txp_swdesc *sd = sc->sc_txd + cons; |
870 | struct mbuf *m; |
871 | |
872 | while (cons != idx) { |
873 | if (cnt == 0) |
874 | break; |
875 | |
876 | bus_dmamap_sync(sc->sc_dmat, dma->dma_map, |
877 | cons * sizeof(struct txp_tx_desc), |
878 | sizeof(struct txp_tx_desc), |
879 | BUS_DMASYNC_POSTWRITE); |
880 | |
881 | if ((txd->tx_flags & TX_FLAGS_TYPE_M) == |
882 | TX_FLAGS_TYPE_DATA) { |
883 | bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, |
884 | sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
885 | bus_dmamap_unload(sc->sc_dmat, sd->sd_map); |
886 | m = sd->sd_mbuf; |
887 | if (m != NULL) { |
888 | m_freem(m); |
889 | txd->tx_addrlo = 0; |
890 | txd->tx_addrhi = 0; |
891 | ifp->if_opackets++; |
892 | } |
893 | } |
894 | ifp->if_flags &= ~IFF_OACTIVE; |
895 | |
896 | if (++cons == TX_ENTRIES) { |
897 | txd = r->r_desc; |
898 | cons = 0; |
899 | sd = sc->sc_txd; |
900 | } else { |
901 | txd++; |
902 | sd++; |
903 | } |
904 | |
905 | cnt--; |
906 | } |
907 | |
908 | r->r_cons = cons; |
909 | r->r_cnt = cnt; |
910 | if (cnt == 0) |
911 | ifp->if_timer = 0; |
912 | } |
913 | |
914 | bool |
915 | txp_shutdown(device_t self, int howto) |
916 | { |
917 | struct txp_softc *sc; |
918 | |
919 | sc = device_private(self); |
920 | |
921 | /* mask all interrupts */ |
922 | WRITE_REG(sc, TXP_IMR, |
923 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | |
924 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | |
925 | TXP_INT_LATCH); |
926 | |
927 | txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); |
928 | txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); |
929 | txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); |
930 | |
931 | return true; |
932 | } |
933 | |
934 | int |
935 | txp_alloc_rings(struct txp_softc *sc) |
936 | { |
937 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
938 | struct txp_boot_record *boot; |
939 | struct txp_swdesc *sd; |
940 | u_int32_t r; |
941 | int i, j, nb; |
942 | |
943 | /* boot record */ |
944 | if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), |
945 | &sc->sc_boot_dma, BUS_DMA_COHERENT)) { |
946 | printf(": can't allocate boot record\n" ); |
947 | return (-1); |
948 | } |
949 | boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr; |
950 | memset(boot, 0, sizeof(*boot)); |
951 | sc->sc_boot = boot; |
952 | |
953 | /* host variables */ |
954 | if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma, |
955 | BUS_DMA_COHERENT)) { |
956 | printf(": can't allocate host ring\n" ); |
957 | goto bail_boot; |
958 | } |
959 | memset(sc->sc_host_dma.dma_vaddr, 0, sizeof(struct txp_hostvar)); |
960 | boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff); |
961 | boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32); |
962 | sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr; |
963 | |
964 | /* high priority tx ring */ |
965 | if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, |
966 | &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) { |
967 | printf(": can't allocate high tx ring\n" ); |
968 | goto bail_host; |
969 | } |
970 | memset(sc->sc_txhiring_dma.dma_vaddr, 0, |
971 | sizeof(struct txp_tx_desc) * TX_ENTRIES); |
972 | boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff); |
973 | boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32); |
974 | boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); |
975 | sc->sc_txhir.r_reg = TXP_H2A_1; |
976 | sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr; |
977 | sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; |
978 | sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; |
979 | for (i = 0; i < TX_ENTRIES; i++) { |
980 | if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, |
981 | TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0, |
982 | BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) { |
983 | for (j = 0; j < i; j++) { |
984 | bus_dmamap_destroy(sc->sc_dmat, |
985 | sc->sc_txd[j].sd_map); |
986 | sc->sc_txd[j].sd_map = NULL; |
987 | } |
988 | goto bail_txhiring; |
989 | } |
990 | } |
991 | |
992 | /* low priority tx ring */ |
993 | if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, |
994 | &sc->sc_txloring_dma, BUS_DMA_COHERENT)) { |
995 | printf(": can't allocate low tx ring\n" ); |
996 | goto bail_txhiring; |
997 | } |
998 | memset(sc->sc_txloring_dma.dma_vaddr, 0, |
999 | sizeof(struct txp_tx_desc) * TX_ENTRIES); |
1000 | boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff); |
1001 | boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32); |
1002 | boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); |
1003 | sc->sc_txlor.r_reg = TXP_H2A_3; |
1004 | sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr; |
1005 | sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; |
1006 | sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; |
1007 | |
1008 | /* high priority rx ring */ |
1009 | if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, |
1010 | &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) { |
1011 | printf(": can't allocate high rx ring\n" ); |
1012 | goto bail_txloring; |
1013 | } |
1014 | memset(sc->sc_rxhiring_dma.dma_vaddr, 0, |
1015 | sizeof(struct txp_rx_desc) * RX_ENTRIES); |
1016 | boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff); |
1017 | boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32); |
1018 | boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); |
1019 | sc->sc_rxhir.r_desc = |
1020 | (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr; |
1021 | sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; |
1022 | sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; |
1023 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map, |
1024 | 0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
1025 | |
1026 | /* low priority ring */ |
1027 | if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, |
1028 | &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) { |
1029 | printf(": can't allocate low rx ring\n" ); |
1030 | goto bail_rxhiring; |
1031 | } |
1032 | memset(sc->sc_rxloring_dma.dma_vaddr, 0, |
1033 | sizeof(struct txp_rx_desc) * RX_ENTRIES); |
1034 | boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff); |
1035 | boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32); |
1036 | boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); |
1037 | sc->sc_rxlor.r_desc = |
1038 | (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr; |
1039 | sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; |
1040 | sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; |
1041 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map, |
1042 | 0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
1043 | |
1044 | /* command ring */ |
1045 | if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES, |
1046 | &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) { |
1047 | printf(": can't allocate command ring\n" ); |
1048 | goto bail_rxloring; |
1049 | } |
1050 | memset(sc->sc_cmdring_dma.dma_vaddr, 0, |
1051 | sizeof(struct txp_cmd_desc) * CMD_ENTRIES); |
1052 | boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff); |
1053 | boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32); |
1054 | boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc)); |
1055 | sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr; |
1056 | sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); |
1057 | sc->sc_cmdring.lastwrite = 0; |
1058 | |
1059 | /* response ring */ |
1060 | if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES, |
1061 | &sc->sc_rspring_dma, BUS_DMA_COHERENT)) { |
1062 | printf(": can't allocate response ring\n" ); |
1063 | goto bail_cmdring; |
1064 | } |
1065 | memset(sc->sc_rspring_dma.dma_vaddr, 0, |
1066 | sizeof(struct txp_rsp_desc) * RSP_ENTRIES); |
1067 | boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff); |
1068 | boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32); |
1069 | boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc)); |
1070 | sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr; |
1071 | sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); |
1072 | sc->sc_rspring.lastwrite = 0; |
1073 | |
1074 | /* receive buffer ring */ |
1075 | if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES, |
1076 | &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) { |
1077 | printf(": can't allocate rx buffer ring\n" ); |
1078 | goto bail_rspring; |
1079 | } |
1080 | memset(sc->sc_rxbufring_dma.dma_vaddr, 0, |
1081 | sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES); |
1082 | boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff); |
1083 | boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32); |
1084 | boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc)); |
1085 | sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr; |
1086 | for (nb = 0; nb < RXBUF_ENTRIES; nb++) { |
1087 | sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), |
1088 | M_DEVBUF, M_NOWAIT); |
1089 | /* stash away pointer */ |
1090 | memcpy(__UNVOLATILE(&sc->sc_rxbufs[nb].rb_vaddrlo), &sd, |
1091 | sizeof(sd)); |
1092 | if (sd == NULL) |
1093 | break; |
1094 | |
1095 | MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); |
1096 | if (sd->sd_mbuf == NULL) { |
1097 | goto bail_rxbufring; |
1098 | } |
1099 | |
1100 | MCLGET(sd->sd_mbuf, M_DONTWAIT); |
1101 | if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { |
1102 | goto bail_rxbufring; |
1103 | } |
1104 | sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; |
1105 | m_set_rcvif(sd->sd_mbuf, ifp); |
1106 | if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, |
1107 | TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) { |
1108 | goto bail_rxbufring; |
1109 | } |
1110 | if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, |
1111 | BUS_DMA_NOWAIT)) { |
1112 | bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); |
1113 | goto bail_rxbufring; |
1114 | } |
1115 | bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, |
1116 | sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
1117 | |
1118 | |
1119 | sc->sc_rxbufs[nb].rb_paddrlo = |
1120 | ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff; |
1121 | sc->sc_rxbufs[nb].rb_paddrhi = |
1122 | ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32; |
1123 | } |
1124 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, |
1125 | 0, sc->sc_rxbufring_dma.dma_map->dm_mapsize, |
1126 | BUS_DMASYNC_PREWRITE); |
1127 | sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) * |
1128 | sizeof(struct txp_rxbuf_desc)); |
1129 | |
1130 | /* zero dma */ |
1131 | if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma, |
1132 | BUS_DMA_COHERENT)) { |
1133 | printf(": can't allocate response ring\n" ); |
1134 | goto bail_rxbufring; |
1135 | } |
1136 | memset(sc->sc_zero_dma.dma_vaddr, 0, sizeof(u_int32_t)); |
1137 | boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff); |
1138 | boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32); |
1139 | |
1140 | /* See if it's waiting for boot, and try to boot it */ |
1141 | for (i = 0; i < 10000; i++) { |
1142 | r = READ_REG(sc, TXP_A2H_0); |
1143 | if (r == STAT_WAITING_FOR_BOOT) |
1144 | break; |
1145 | DELAY(50); |
1146 | } |
1147 | if (r != STAT_WAITING_FOR_BOOT) { |
1148 | printf(": not waiting for boot\n" ); |
1149 | goto bail; |
1150 | } |
1151 | WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32); |
1152 | WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff); |
1153 | WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); |
1154 | |
1155 | /* See if it booted */ |
1156 | for (i = 0; i < 10000; i++) { |
1157 | r = READ_REG(sc, TXP_A2H_0); |
1158 | if (r == STAT_RUNNING) |
1159 | break; |
1160 | DELAY(50); |
1161 | } |
1162 | if (r != STAT_RUNNING) { |
1163 | printf(": fw not running\n" ); |
1164 | goto bail; |
1165 | } |
1166 | |
1167 | /* Clear TX and CMD ring write registers */ |
1168 | WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); |
1169 | WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); |
1170 | WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); |
1171 | WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); |
1172 | |
1173 | return (0); |
1174 | |
1175 | bail: |
1176 | txp_dma_free(sc, &sc->sc_zero_dma); |
1177 | bail_rxbufring: |
1178 | if (nb == RXBUF_ENTRIES) |
1179 | nb--; |
1180 | for (i = 0; i <= nb; i++) { |
1181 | memcpy(&sd, __UNVOLATILE(&sc->sc_rxbufs[i].rb_vaddrlo), |
1182 | sizeof(sd)); |
1183 | if (sd) |
1184 | free(sd, M_DEVBUF); |
1185 | } |
1186 | txp_dma_free(sc, &sc->sc_rxbufring_dma); |
1187 | bail_rspring: |
1188 | txp_dma_free(sc, &sc->sc_rspring_dma); |
1189 | bail_cmdring: |
1190 | txp_dma_free(sc, &sc->sc_cmdring_dma); |
1191 | bail_rxloring: |
1192 | txp_dma_free(sc, &sc->sc_rxloring_dma); |
1193 | bail_rxhiring: |
1194 | txp_dma_free(sc, &sc->sc_rxhiring_dma); |
1195 | bail_txloring: |
1196 | txp_dma_free(sc, &sc->sc_txloring_dma); |
1197 | bail_txhiring: |
1198 | txp_dma_free(sc, &sc->sc_txhiring_dma); |
1199 | bail_host: |
1200 | txp_dma_free(sc, &sc->sc_host_dma); |
1201 | bail_boot: |
1202 | txp_dma_free(sc, &sc->sc_boot_dma); |
1203 | return (-1); |
1204 | } |
1205 | |
1206 | int |
1207 | txp_dma_malloc(struct txp_softc *sc, bus_size_t size, |
1208 | struct txp_dma_alloc *dma, int mapflags) |
1209 | { |
1210 | int r; |
1211 | |
1212 | if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, |
1213 | &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0) |
1214 | goto fail_0; |
1215 | |
1216 | if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, |
1217 | size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) |
1218 | goto fail_1; |
1219 | |
1220 | if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, |
1221 | BUS_DMA_NOWAIT, &dma->dma_map)) != 0) |
1222 | goto fail_2; |
1223 | |
1224 | if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, |
1225 | size, NULL, BUS_DMA_NOWAIT)) != 0) |
1226 | goto fail_3; |
1227 | |
1228 | dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; |
1229 | return (0); |
1230 | |
1231 | fail_3: |
1232 | bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); |
1233 | fail_2: |
1234 | bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); |
1235 | fail_1: |
1236 | bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); |
1237 | fail_0: |
1238 | return (r); |
1239 | } |
1240 | |
1241 | void |
1242 | txp_dma_free(struct txp_softc *sc, struct txp_dma_alloc *dma) |
1243 | { |
1244 | bus_dmamap_unload(sc->sc_dmat, dma->dma_map); |
1245 | bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize); |
1246 | bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); |
1247 | bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); |
1248 | } |
1249 | |
1250 | int |
1251 | txp_ioctl(struct ifnet *ifp, u_long command, void *data) |
1252 | { |
1253 | struct txp_softc *sc = ifp->if_softc; |
1254 | struct ifreq *ifr = (struct ifreq *)data; |
1255 | struct ifaddr *ifa = (struct ifaddr *)data; |
1256 | int s, error = 0; |
1257 | |
1258 | s = splnet(); |
1259 | |
1260 | #if 0 |
1261 | if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) { |
1262 | splx(s); |
1263 | return error; |
1264 | } |
1265 | #endif |
1266 | |
1267 | switch(command) { |
1268 | case SIOCINITIFADDR: |
1269 | ifp->if_flags |= IFF_UP; |
1270 | txp_init(sc); |
1271 | switch (ifa->ifa_addr->sa_family) { |
1272 | #ifdef INET |
1273 | case AF_INET: |
1274 | arp_ifinit(ifp, ifa); |
1275 | break; |
1276 | #endif /* INET */ |
1277 | default: |
1278 | break; |
1279 | } |
1280 | break; |
1281 | case SIOCSIFFLAGS: |
1282 | if ((error = ifioctl_common(ifp, command, data)) != 0) |
1283 | break; |
1284 | if (ifp->if_flags & IFF_UP) { |
1285 | txp_init(sc); |
1286 | } else { |
1287 | if (ifp->if_flags & IFF_RUNNING) |
1288 | txp_stop(sc); |
1289 | } |
1290 | break; |
1291 | case SIOCADDMULTI: |
1292 | case SIOCDELMULTI: |
1293 | if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) |
1294 | break; |
1295 | |
1296 | error = 0; |
1297 | |
1298 | if (command != SIOCADDMULTI && command != SIOCDELMULTI) |
1299 | ; |
1300 | else if (ifp->if_flags & IFF_RUNNING) { |
1301 | /* |
1302 | * Multicast list has changed; set the hardware |
1303 | * filter accordingly. |
1304 | */ |
1305 | txp_set_filter(sc); |
1306 | } |
1307 | break; |
1308 | case SIOCGIFMEDIA: |
1309 | case SIOCSIFMEDIA: |
1310 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); |
1311 | break; |
1312 | default: |
1313 | error = ether_ioctl(ifp, command, data); |
1314 | break; |
1315 | } |
1316 | |
1317 | splx(s); |
1318 | |
1319 | return(error); |
1320 | } |
1321 | |
1322 | void |
1323 | txp_init(struct txp_softc *sc) |
1324 | { |
1325 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
1326 | int s; |
1327 | |
1328 | txp_stop(sc); |
1329 | |
1330 | s = splnet(); |
1331 | |
1332 | txp_set_filter(sc); |
1333 | |
1334 | txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); |
1335 | txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); |
1336 | |
1337 | WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | |
1338 | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | |
1339 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | |
1340 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | |
1341 | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); |
1342 | WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); |
1343 | |
1344 | ifp->if_flags |= IFF_RUNNING; |
1345 | ifp->if_flags &= ~IFF_OACTIVE; |
1346 | ifp->if_timer = 0; |
1347 | |
1348 | if (!callout_pending(&sc->sc_tick)) |
1349 | callout_schedule(&sc->sc_tick, hz); |
1350 | |
1351 | splx(s); |
1352 | } |
1353 | |
1354 | void |
1355 | txp_tick(void *vsc) |
1356 | { |
1357 | struct txp_softc *sc = vsc; |
1358 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
1359 | struct txp_rsp_desc *rsp = NULL; |
1360 | struct txp_ext_desc *ext; |
1361 | int s; |
1362 | |
1363 | s = splnet(); |
1364 | txp_rxbuf_reclaim(sc); |
1365 | |
1366 | if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, |
1367 | &rsp, 1)) |
1368 | goto out; |
1369 | if (rsp->rsp_numdesc != 6) |
1370 | goto out; |
1371 | if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, |
1372 | NULL, NULL, NULL, 1)) |
1373 | goto out; |
1374 | ext = (struct txp_ext_desc *)(rsp + 1); |
1375 | |
1376 | ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + |
1377 | ext[4].ext_1 + ext[4].ext_4; |
1378 | ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + |
1379 | ext[2].ext_1; |
1380 | ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + |
1381 | ext[1].ext_3; |
1382 | ifp->if_opackets += rsp->rsp_par2; |
1383 | ifp->if_ipackets += ext[2].ext_3; |
1384 | |
1385 | out: |
1386 | if (rsp != NULL) |
1387 | free(rsp, M_DEVBUF); |
1388 | |
1389 | splx(s); |
1390 | callout_schedule(&sc->sc_tick, hz); |
1391 | } |
1392 | |
1393 | void |
1394 | txp_start(struct ifnet *ifp) |
1395 | { |
1396 | struct txp_softc *sc = ifp->if_softc; |
1397 | struct txp_tx_ring *r = &sc->sc_txhir; |
1398 | struct txp_tx_desc *txd; |
1399 | int txdidx; |
1400 | struct txp_frag_desc *fxd; |
1401 | struct mbuf *m, *mnew; |
1402 | struct txp_swdesc *sd; |
1403 | u_int32_t firstprod, firstcnt, prod, cnt, i; |
1404 | struct m_tag *mtag; |
1405 | |
1406 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
1407 | return; |
1408 | |
1409 | prod = r->r_prod; |
1410 | cnt = r->r_cnt; |
1411 | |
1412 | while (1) { |
1413 | IFQ_POLL(&ifp->if_snd, m); |
1414 | if (m == NULL) |
1415 | break; |
1416 | mnew = NULL; |
1417 | |
1418 | firstprod = prod; |
1419 | firstcnt = cnt; |
1420 | |
1421 | sd = sc->sc_txd + prod; |
1422 | sd->sd_mbuf = m; |
1423 | |
1424 | if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, |
1425 | BUS_DMA_NOWAIT)) { |
1426 | MGETHDR(mnew, M_DONTWAIT, MT_DATA); |
1427 | if (mnew == NULL) |
1428 | goto oactive1; |
1429 | if (m->m_pkthdr.len > MHLEN) { |
1430 | MCLGET(mnew, M_DONTWAIT); |
1431 | if ((mnew->m_flags & M_EXT) == 0) { |
1432 | m_freem(mnew); |
1433 | goto oactive1; |
1434 | } |
1435 | } |
1436 | m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, void *)); |
1437 | mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len; |
1438 | IFQ_DEQUEUE(&ifp->if_snd, m); |
1439 | m_freem(m); |
1440 | m = mnew; |
1441 | if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, |
1442 | BUS_DMA_NOWAIT)) |
1443 | goto oactive1; |
1444 | } |
1445 | |
1446 | if ((TX_ENTRIES - cnt) < 4) |
1447 | goto oactive; |
1448 | |
1449 | txd = r->r_desc + prod; |
1450 | txdidx = prod; |
1451 | txd->tx_flags = TX_FLAGS_TYPE_DATA; |
1452 | txd->tx_numdesc = 0; |
1453 | txd->tx_addrlo = 0; |
1454 | txd->tx_addrhi = 0; |
1455 | txd->tx_totlen = m->m_pkthdr.len; |
1456 | txd->tx_pflags = 0; |
1457 | txd->tx_numdesc = sd->sd_map->dm_nsegs; |
1458 | |
1459 | if (++prod == TX_ENTRIES) |
1460 | prod = 0; |
1461 | |
1462 | if (++cnt >= (TX_ENTRIES - 4)) |
1463 | goto oactive; |
1464 | |
1465 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_arpcom, m))) |
1466 | txd->tx_pflags = TX_PFLAGS_VLAN | |
1467 | (htons(VLAN_TAG_VALUE(mtag)) << TX_PFLAGS_VLANTAG_S); |
1468 | |
1469 | if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) |
1470 | txd->tx_pflags |= TX_PFLAGS_IPCKSUM; |
1471 | #ifdef TRY_TX_TCP_CSUM |
1472 | if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4) |
1473 | txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; |
1474 | #endif |
1475 | #ifdef TRY_TX_UDP_CSUM |
1476 | if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) |
1477 | txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; |
1478 | #endif |
1479 | |
1480 | bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, |
1481 | sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1482 | |
1483 | fxd = (struct txp_frag_desc *)(r->r_desc + prod); |
1484 | for (i = 0; i < sd->sd_map->dm_nsegs; i++) { |
1485 | if (++cnt >= (TX_ENTRIES - 4)) { |
1486 | bus_dmamap_sync(sc->sc_dmat, sd->sd_map, |
1487 | 0, sd->sd_map->dm_mapsize, |
1488 | BUS_DMASYNC_POSTWRITE); |
1489 | goto oactive; |
1490 | } |
1491 | |
1492 | fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | |
1493 | FRAG_FLAGS_VALID; |
1494 | fxd->frag_rsvd1 = 0; |
1495 | fxd->frag_len = sd->sd_map->dm_segs[i].ds_len; |
1496 | fxd->frag_addrlo = |
1497 | ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) & |
1498 | 0xffffffff; |
1499 | fxd->frag_addrhi = |
1500 | ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >> |
1501 | 32; |
1502 | fxd->frag_rsvd2 = 0; |
1503 | |
1504 | bus_dmamap_sync(sc->sc_dmat, |
1505 | sc->sc_txhiring_dma.dma_map, |
1506 | prod * sizeof(struct txp_frag_desc), |
1507 | sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE); |
1508 | |
1509 | if (++prod == TX_ENTRIES) { |
1510 | fxd = (struct txp_frag_desc *)r->r_desc; |
1511 | prod = 0; |
1512 | } else |
1513 | fxd++; |
1514 | |
1515 | } |
1516 | |
1517 | /* |
1518 | * if mnew isn't NULL, we already dequeued and copied |
1519 | * the packet. |
1520 | */ |
1521 | if (mnew == NULL) |
1522 | IFQ_DEQUEUE(&ifp->if_snd, m); |
1523 | |
1524 | ifp->if_timer = 5; |
1525 | |
1526 | bpf_mtap(ifp, m); |
1527 | |
1528 | txd->tx_flags |= TX_FLAGS_VALID; |
1529 | bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map, |
1530 | txdidx * sizeof(struct txp_tx_desc), |
1531 | sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE); |
1532 | |
1533 | #if 0 |
1534 | { |
1535 | struct mbuf *mx; |
1536 | int i; |
1537 | |
1538 | printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n" , |
1539 | txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, |
1540 | txd->tx_pflags); |
1541 | for (mx = m; mx != NULL; mx = mx->m_next) { |
1542 | for (i = 0; i < mx->m_len; i++) { |
1543 | printf(":%02x" , |
1544 | (u_int8_t)m->m_data[i]); |
1545 | } |
1546 | } |
1547 | printf("\n" ); |
1548 | } |
1549 | #endif |
1550 | |
1551 | WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); |
1552 | } |
1553 | |
1554 | r->r_prod = prod; |
1555 | r->r_cnt = cnt; |
1556 | return; |
1557 | |
1558 | oactive: |
1559 | bus_dmamap_unload(sc->sc_dmat, sd->sd_map); |
1560 | oactive1: |
1561 | ifp->if_flags |= IFF_OACTIVE; |
1562 | r->r_prod = firstprod; |
1563 | r->r_cnt = firstcnt; |
1564 | } |
1565 | |
1566 | /* |
1567 | * Handle simple commands sent to the typhoon |
1568 | */ |
1569 | int |
1570 | txp_command(struct txp_softc *sc, u_int16_t id, u_int16_t in1, u_int32_t in2, |
1571 | u_int32_t in3, u_int16_t *out1, u_int32_t *out2, u_int32_t *out3, int wait) |
1572 | { |
1573 | struct txp_rsp_desc *rsp = NULL; |
1574 | |
1575 | if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) |
1576 | return (-1); |
1577 | |
1578 | if (!wait) |
1579 | return (0); |
1580 | |
1581 | if (out1 != NULL) |
1582 | *out1 = le16toh(rsp->rsp_par1); |
1583 | if (out2 != NULL) |
1584 | *out2 = le32toh(rsp->rsp_par2); |
1585 | if (out3 != NULL) |
1586 | *out3 = le32toh(rsp->rsp_par3); |
1587 | free(rsp, M_DEVBUF); |
1588 | return (0); |
1589 | } |
1590 | |
1591 | int |
1592 | txp_command2(struct txp_softc *sc, u_int16_t id, u_int16_t in1, u_int32_t in2, |
1593 | u_int32_t in3, struct txp_ext_desc *in_extp, u_int8_t in_extn, |
1594 | struct txp_rsp_desc **rspp, int wait) |
1595 | { |
1596 | struct txp_hostvar *hv = sc->sc_hostvar; |
1597 | struct txp_cmd_desc *cmd; |
1598 | struct txp_ext_desc *ext; |
1599 | u_int32_t idx, i; |
1600 | u_int16_t seq; |
1601 | |
1602 | if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { |
1603 | printf("%s: no free cmd descriptors\n" , TXP_DEVNAME(sc)); |
1604 | return (-1); |
1605 | } |
1606 | |
1607 | idx = sc->sc_cmdring.lastwrite; |
1608 | cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); |
1609 | memset(cmd, 0, sizeof(*cmd)); |
1610 | |
1611 | cmd->cmd_numdesc = in_extn; |
1612 | seq = sc->sc_seq++; |
1613 | cmd->cmd_seq = htole16(seq); |
1614 | cmd->cmd_id = htole16(id); |
1615 | cmd->cmd_par1 = htole16(in1); |
1616 | cmd->cmd_par2 = htole32(in2); |
1617 | cmd->cmd_par3 = htole32(in3); |
1618 | cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | |
1619 | (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; |
1620 | |
1621 | idx += sizeof(struct txp_cmd_desc); |
1622 | if (idx == sc->sc_cmdring.size) |
1623 | idx = 0; |
1624 | |
1625 | for (i = 0; i < in_extn; i++) { |
1626 | ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); |
1627 | memcpy(ext, in_extp, sizeof(struct txp_ext_desc)); |
1628 | in_extp++; |
1629 | idx += sizeof(struct txp_cmd_desc); |
1630 | if (idx == sc->sc_cmdring.size) |
1631 | idx = 0; |
1632 | } |
1633 | |
1634 | sc->sc_cmdring.lastwrite = idx; |
1635 | |
1636 | WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); |
1637 | bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, |
1638 | sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); |
1639 | |
1640 | if (!wait) |
1641 | return (0); |
1642 | |
1643 | for (i = 0; i < 10000; i++) { |
1644 | bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, |
1645 | sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD); |
1646 | idx = le32toh(hv->hv_resp_read_idx); |
1647 | if (idx != le32toh(hv->hv_resp_write_idx)) { |
1648 | *rspp = NULL; |
1649 | if (txp_response(sc, idx, id, seq, rspp)) |
1650 | return (-1); |
1651 | if (*rspp != NULL) |
1652 | break; |
1653 | } |
1654 | bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, |
1655 | sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); |
1656 | DELAY(50); |
1657 | } |
1658 | if (i == 1000 || (*rspp) == NULL) { |
1659 | printf("%s: 0x%x command failed\n" , TXP_DEVNAME(sc), id); |
1660 | return (-1); |
1661 | } |
1662 | |
1663 | return (0); |
1664 | } |
1665 | |
1666 | int |
1667 | txp_response(struct txp_softc *sc, u_int32_t ridx, u_int16_t id, u_int16_t seq, |
1668 | struct txp_rsp_desc **rspp) |
1669 | { |
1670 | struct txp_hostvar *hv = sc->sc_hostvar; |
1671 | struct txp_rsp_desc *rsp; |
1672 | |
1673 | while (ridx != le32toh(hv->hv_resp_write_idx)) { |
1674 | rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); |
1675 | |
1676 | if (id == le16toh(rsp->rsp_id) && le16toh(rsp->rsp_seq) == seq) { |
1677 | *rspp = (struct txp_rsp_desc *)malloc( |
1678 | sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), |
1679 | M_DEVBUF, M_NOWAIT); |
1680 | if ((*rspp) == NULL) |
1681 | return (-1); |
1682 | txp_rsp_fixup(sc, rsp, *rspp); |
1683 | return (0); |
1684 | } |
1685 | |
1686 | if (rsp->rsp_flags & RSP_FLAGS_ERROR) { |
1687 | printf("%s: response error: id 0x%x\n" , |
1688 | TXP_DEVNAME(sc), le16toh(rsp->rsp_id)); |
1689 | txp_rsp_fixup(sc, rsp, NULL); |
1690 | ridx = le32toh(hv->hv_resp_read_idx); |
1691 | continue; |
1692 | } |
1693 | |
1694 | switch (le16toh(rsp->rsp_id)) { |
1695 | case TXP_CMD_CYCLE_STATISTICS: |
1696 | case TXP_CMD_MEDIA_STATUS_READ: |
1697 | break; |
1698 | case TXP_CMD_HELLO_RESPONSE: |
1699 | printf("%s: hello\n" , TXP_DEVNAME(sc)); |
1700 | break; |
1701 | default: |
1702 | printf("%s: unknown id(0x%x)\n" , TXP_DEVNAME(sc), |
1703 | le16toh(rsp->rsp_id)); |
1704 | } |
1705 | |
1706 | txp_rsp_fixup(sc, rsp, NULL); |
1707 | ridx = le32toh(hv->hv_resp_read_idx); |
1708 | hv->hv_resp_read_idx = le32toh(ridx); |
1709 | } |
1710 | |
1711 | return (0); |
1712 | } |
1713 | |
1714 | void |
1715 | txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp, |
1716 | struct txp_rsp_desc *dst) |
1717 | { |
1718 | struct txp_rsp_desc *src = rsp; |
1719 | struct txp_hostvar *hv = sc->sc_hostvar; |
1720 | u_int32_t i, ridx; |
1721 | |
1722 | ridx = le32toh(hv->hv_resp_read_idx); |
1723 | |
1724 | for (i = 0; i < rsp->rsp_numdesc + 1; i++) { |
1725 | if (dst != NULL) |
1726 | memcpy(dst++, src, sizeof(struct txp_rsp_desc)); |
1727 | ridx += sizeof(struct txp_rsp_desc); |
1728 | if (ridx == sc->sc_rspring.size) { |
1729 | src = sc->sc_rspring.base; |
1730 | ridx = 0; |
1731 | } else |
1732 | src++; |
1733 | sc->sc_rspring.lastwrite = ridx; |
1734 | hv->hv_resp_read_idx = htole32(ridx); |
1735 | } |
1736 | |
1737 | hv->hv_resp_read_idx = htole32(ridx); |
1738 | } |
1739 | |
1740 | int |
1741 | txp_cmd_desc_numfree(struct txp_softc *sc) |
1742 | { |
1743 | struct txp_hostvar *hv = sc->sc_hostvar; |
1744 | struct txp_boot_record *br = sc->sc_boot; |
1745 | u_int32_t widx, ridx, nfree; |
1746 | |
1747 | widx = sc->sc_cmdring.lastwrite; |
1748 | ridx = le32toh(hv->hv_cmd_read_idx); |
1749 | |
1750 | if (widx == ridx) { |
1751 | /* Ring is completely free */ |
1752 | nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc); |
1753 | } else { |
1754 | if (widx > ridx) |
1755 | nfree = le32toh(br->br_cmd_siz) - |
1756 | (widx - ridx + sizeof(struct txp_cmd_desc)); |
1757 | else |
1758 | nfree = ridx - widx - sizeof(struct txp_cmd_desc); |
1759 | } |
1760 | |
1761 | return (nfree / sizeof(struct txp_cmd_desc)); |
1762 | } |
1763 | |
1764 | void |
1765 | txp_stop(struct txp_softc *sc) |
1766 | { |
1767 | txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); |
1768 | txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); |
1769 | |
1770 | if (callout_pending(&sc->sc_tick)) |
1771 | callout_stop(&sc->sc_tick); |
1772 | } |
1773 | |
1774 | void |
1775 | txp_watchdog(struct ifnet *ifp) |
1776 | { |
1777 | } |
1778 | |
1779 | int |
1780 | txp_ifmedia_upd(struct ifnet *ifp) |
1781 | { |
1782 | struct txp_softc *sc = ifp->if_softc; |
1783 | struct ifmedia *ifm = &sc->sc_ifmedia; |
1784 | u_int16_t new_xcvr; |
1785 | |
1786 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
1787 | return (EINVAL); |
1788 | |
1789 | if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { |
1790 | if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) |
1791 | new_xcvr = TXP_XCVR_10_FDX; |
1792 | else |
1793 | new_xcvr = TXP_XCVR_10_HDX; |
1794 | } else if ((IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) || |
1795 | (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX)) { |
1796 | if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) |
1797 | new_xcvr = TXP_XCVR_100_FDX; |
1798 | else |
1799 | new_xcvr = TXP_XCVR_100_HDX; |
1800 | } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { |
1801 | new_xcvr = TXP_XCVR_AUTO; |
1802 | } else |
1803 | return (EINVAL); |
1804 | |
1805 | /* nothing to do */ |
1806 | if (sc->sc_xcvr == new_xcvr) |
1807 | return (0); |
1808 | |
1809 | txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, |
1810 | NULL, NULL, NULL, 0); |
1811 | sc->sc_xcvr = new_xcvr; |
1812 | |
1813 | return (0); |
1814 | } |
1815 | |
1816 | void |
1817 | txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
1818 | { |
1819 | struct txp_softc *sc = ifp->if_softc; |
1820 | struct ifmedia *ifm = &sc->sc_ifmedia; |
1821 | u_int16_t bmsr, bmcr, anlpar; |
1822 | |
1823 | ifmr->ifm_status = IFM_AVALID; |
1824 | ifmr->ifm_active = IFM_ETHER; |
1825 | |
1826 | if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, |
1827 | &bmsr, NULL, NULL, 1)) |
1828 | goto bail; |
1829 | if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, |
1830 | &bmsr, NULL, NULL, 1)) |
1831 | goto bail; |
1832 | |
1833 | if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, |
1834 | &bmcr, NULL, NULL, 1)) |
1835 | goto bail; |
1836 | |
1837 | if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, |
1838 | &anlpar, NULL, NULL, 1)) |
1839 | goto bail; |
1840 | |
1841 | if (bmsr & BMSR_LINK) |
1842 | ifmr->ifm_status |= IFM_ACTIVE; |
1843 | |
1844 | if (bmcr & BMCR_ISO) { |
1845 | ifmr->ifm_active |= IFM_NONE; |
1846 | ifmr->ifm_status = 0; |
1847 | return; |
1848 | } |
1849 | |
1850 | if (bmcr & BMCR_LOOP) |
1851 | ifmr->ifm_active |= IFM_LOOP; |
1852 | |
1853 | if (!(sc->sc_flags & TXP_FIBER) && (bmcr & BMCR_AUTOEN)) { |
1854 | if ((bmsr & BMSR_ACOMP) == 0) { |
1855 | ifmr->ifm_active |= IFM_NONE; |
1856 | return; |
1857 | } |
1858 | |
1859 | if (anlpar & ANLPAR_TX_FD) |
1860 | ifmr->ifm_active |= IFM_100_TX|IFM_FDX; |
1861 | else if (anlpar & ANLPAR_T4) |
1862 | ifmr->ifm_active |= IFM_100_T4|IFM_HDX; |
1863 | else if (anlpar & ANLPAR_TX) |
1864 | ifmr->ifm_active |= IFM_100_TX|IFM_HDX; |
1865 | else if (anlpar & ANLPAR_10_FD) |
1866 | ifmr->ifm_active |= IFM_10_T|IFM_FDX; |
1867 | else if (anlpar & ANLPAR_10) |
1868 | ifmr->ifm_active |= IFM_10_T|IFM_HDX; |
1869 | else |
1870 | ifmr->ifm_active |= IFM_NONE; |
1871 | } else |
1872 | ifmr->ifm_active = ifm->ifm_cur->ifm_media; |
1873 | return; |
1874 | |
1875 | bail: |
1876 | ifmr->ifm_active |= IFM_NONE; |
1877 | ifmr->ifm_status &= ~IFM_AVALID; |
1878 | } |
1879 | |
1880 | void |
1881 | txp_show_descriptor(void *d) |
1882 | { |
1883 | struct txp_cmd_desc *cmd = d; |
1884 | struct txp_rsp_desc *rsp = d; |
1885 | struct txp_tx_desc *txd = d; |
1886 | struct txp_frag_desc *frgd = d; |
1887 | |
1888 | switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { |
1889 | case CMD_FLAGS_TYPE_CMD: |
1890 | /* command descriptor */ |
1891 | printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n" , |
1892 | cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id), |
1893 | le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1), |
1894 | le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3)); |
1895 | break; |
1896 | case CMD_FLAGS_TYPE_RESP: |
1897 | /* response descriptor */ |
1898 | printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n" , |
1899 | rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id), |
1900 | le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1), |
1901 | le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3)); |
1902 | break; |
1903 | case CMD_FLAGS_TYPE_DATA: |
1904 | /* data header (assuming tx for now) */ |
1905 | printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]" , |
1906 | txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, |
1907 | txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); |
1908 | break; |
1909 | case CMD_FLAGS_TYPE_FRAG: |
1910 | /* fragment descriptor */ |
1911 | printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]" , |
1912 | frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, |
1913 | frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); |
1914 | break; |
1915 | default: |
1916 | printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n" , |
1917 | cmd->cmd_flags & CMD_FLAGS_TYPE_M, |
1918 | cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id), |
1919 | le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1), |
1920 | le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3)); |
1921 | break; |
1922 | } |
1923 | } |
1924 | |
1925 | void |
1926 | txp_set_filter(struct txp_softc *sc) |
1927 | { |
1928 | struct ethercom *ac = &sc->sc_arpcom; |
1929 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
1930 | u_int32_t crc, carry, hashbit, hash[2]; |
1931 | u_int16_t filter; |
1932 | u_int8_t octet; |
1933 | int i, j, mcnt = 0; |
1934 | struct ether_multi *enm; |
1935 | struct ether_multistep step; |
1936 | |
1937 | if (ifp->if_flags & IFF_PROMISC) { |
1938 | filter = TXP_RXFILT_PROMISC; |
1939 | goto setit; |
1940 | } |
1941 | |
1942 | again: |
1943 | filter = TXP_RXFILT_DIRECT; |
1944 | |
1945 | if (ifp->if_flags & IFF_BROADCAST) |
1946 | filter |= TXP_RXFILT_BROADCAST; |
1947 | |
1948 | if (ifp->if_flags & IFF_ALLMULTI) |
1949 | filter |= TXP_RXFILT_ALLMULTI; |
1950 | else { |
1951 | hash[0] = hash[1] = 0; |
1952 | |
1953 | ETHER_FIRST_MULTI(step, ac, enm); |
1954 | while (enm != NULL) { |
1955 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, |
1956 | ETHER_ADDR_LEN)) { |
1957 | /* |
1958 | * We must listen to a range of multicast |
1959 | * addresses. For now, just accept all |
1960 | * multicasts, rather than trying to set only |
1961 | * those filter bits needed to match the range. |
1962 | * (At this time, the only use of address |
1963 | * ranges is for IP multicast routing, for |
1964 | * which the range is big enough to require |
1965 | * all bits set.) |
1966 | */ |
1967 | ifp->if_flags |= IFF_ALLMULTI; |
1968 | goto again; |
1969 | } |
1970 | |
1971 | mcnt++; |
1972 | crc = 0xffffffff; |
1973 | |
1974 | for (i = 0; i < ETHER_ADDR_LEN; i++) { |
1975 | octet = enm->enm_addrlo[i]; |
1976 | for (j = 0; j < 8; j++) { |
1977 | carry = ((crc & 0x80000000) ? 1 : 0) ^ |
1978 | (octet & 1); |
1979 | crc <<= 1; |
1980 | octet >>= 1; |
1981 | if (carry) |
1982 | crc = (crc ^ TXP_POLYNOMIAL) | |
1983 | carry; |
1984 | } |
1985 | } |
1986 | hashbit = (u_int16_t)(crc & (64 - 1)); |
1987 | hash[hashbit / 32] |= (1 << hashbit % 32); |
1988 | ETHER_NEXT_MULTI(step, enm); |
1989 | } |
1990 | |
1991 | if (mcnt > 0) { |
1992 | filter |= TXP_RXFILT_HASHMULTI; |
1993 | txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, |
1994 | 2, hash[0], hash[1], NULL, NULL, NULL, 0); |
1995 | } |
1996 | } |
1997 | |
1998 | setit: |
1999 | txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, |
2000 | NULL, NULL, NULL, 1); |
2001 | } |
2002 | |
2003 | void |
2004 | txp_capabilities(struct txp_softc *sc) |
2005 | { |
2006 | struct ifnet *ifp = &sc->sc_arpcom.ec_if; |
2007 | struct txp_rsp_desc *rsp = NULL; |
2008 | struct txp_ext_desc *ext; |
2009 | |
2010 | if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) |
2011 | goto out; |
2012 | |
2013 | if (rsp->rsp_numdesc != 1) |
2014 | goto out; |
2015 | ext = (struct txp_ext_desc *)(rsp + 1); |
2016 | |
2017 | sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; |
2018 | sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; |
2019 | |
2020 | sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
2021 | if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { |
2022 | sc->sc_tx_capability |= OFFLOAD_VLAN; |
2023 | sc->sc_rx_capability |= OFFLOAD_VLAN; |
2024 | sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; |
2025 | } |
2026 | |
2027 | #if 0 |
2028 | /* not ready yet */ |
2029 | if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { |
2030 | sc->sc_tx_capability |= OFFLOAD_IPSEC; |
2031 | sc->sc_rx_capability |= OFFLOAD_IPSEC; |
2032 | ifp->if_capabilities |= IFCAP_IPSEC; |
2033 | } |
2034 | #endif |
2035 | |
2036 | if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { |
2037 | sc->sc_tx_capability |= OFFLOAD_IPCKSUM; |
2038 | sc->sc_rx_capability |= OFFLOAD_IPCKSUM; |
2039 | ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; |
2040 | } |
2041 | |
2042 | if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { |
2043 | sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; |
2044 | #ifdef TRY_TX_TCP_CSUM |
2045 | sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; |
2046 | ifp->if_capabilities |= |
2047 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx; |
2048 | #endif |
2049 | } |
2050 | |
2051 | if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { |
2052 | sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; |
2053 | #ifdef TRY_TX_UDP_CSUM |
2054 | sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; |
2055 | ifp->if_capabilities |= |
2056 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
2057 | #endif |
2058 | } |
2059 | |
2060 | if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, |
2061 | sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) |
2062 | goto out; |
2063 | |
2064 | out: |
2065 | if (rsp != NULL) |
2066 | free(rsp, M_DEVBUF); |
2067 | } |
2068 | |