1 | /* $NetBSD: if_bge.c,v 1.298 2016/07/11 06:14:51 knakahara Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2001 Wind River Systems |
5 | * Copyright (c) 1997, 1998, 1999, 2001 |
6 | * Bill Paul <wpaul@windriver.com>. All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions |
10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. All advertising materials mentioning features or use of this software |
17 | * must display the following acknowledgement: |
18 | * This product includes software developed by Bill Paul. |
19 | * 4. Neither the name of the author nor the names of any co-contributors |
20 | * may be used to endorse or promote products derived from this software |
21 | * without specific prior written permission. |
22 | * |
23 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND |
24 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD |
27 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
30 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
31 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
32 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
33 | * THE POSSIBILITY OF SUCH DAMAGE. |
34 | * |
35 | * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ |
36 | */ |
37 | |
38 | /* |
39 | * Broadcom BCM570x family gigabit ethernet driver for NetBSD. |
40 | * |
41 | * NetBSD version by: |
42 | * |
43 | * Frank van der Linden <fvdl@wasabisystems.com> |
44 | * Jason Thorpe <thorpej@wasabisystems.com> |
45 | * Jonathan Stone <jonathan@dsg.stanford.edu> |
46 | * |
47 | * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> |
48 | * Senior Engineer, Wind River Systems |
49 | */ |
50 | |
51 | /* |
52 | * The Broadcom BCM5700 is based on technology originally developed by |
53 | * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet |
54 | * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has |
55 | * two on-board MIPS R4000 CPUs and can have as much as 16MB of external |
56 | * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo |
57 | * frames, highly configurable RX filtering, and 16 RX and TX queues |
58 | * (which, along with RX filter rules, can be used for QOS applications). |
59 | * Other features, such as TCP segmentation, may be available as part |
60 | * of value-added firmware updates. Unlike the Tigon I and Tigon II, |
61 | * firmware images can be stored in hardware and need not be compiled |
62 | * into the driver. |
63 | * |
64 | * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will |
65 | * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. |
66 | * |
67 | * The BCM5701 is a single-chip solution incorporating both the BCM5700 |
68 | * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 |
69 | * does not support external SSRAM. |
70 | * |
71 | * Broadcom also produces a variation of the BCM5700 under the "Altima" |
72 | * brand name, which is functionally similar but lacks PCI-X support. |
73 | * |
74 | * Without external SSRAM, you can only have at most 4 TX rings, |
75 | * and the use of the mini RX ring is disabled. This seems to imply |
76 | * that these features are simply not available on the BCM5701. As a |
77 | * result, this driver does not implement any support for the mini RX |
78 | * ring. |
79 | */ |
80 | |
81 | #include <sys/cdefs.h> |
82 | __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.298 2016/07/11 06:14:51 knakahara Exp $" ); |
83 | |
84 | #include <sys/param.h> |
85 | #include <sys/systm.h> |
86 | #include <sys/callout.h> |
87 | #include <sys/sockio.h> |
88 | #include <sys/mbuf.h> |
89 | #include <sys/malloc.h> |
90 | #include <sys/kernel.h> |
91 | #include <sys/device.h> |
92 | #include <sys/socket.h> |
93 | #include <sys/sysctl.h> |
94 | |
95 | #include <net/if.h> |
96 | #include <net/if_dl.h> |
97 | #include <net/if_media.h> |
98 | #include <net/if_ether.h> |
99 | |
100 | #include <sys/rndsource.h> |
101 | |
102 | #ifdef INET |
103 | #include <netinet/in.h> |
104 | #include <netinet/in_systm.h> |
105 | #include <netinet/in_var.h> |
106 | #include <netinet/ip.h> |
107 | #endif |
108 | |
109 | /* Headers for TCP Segmentation Offload (TSO) */ |
110 | #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ |
111 | #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ |
112 | #include <netinet/ip.h> /* for struct ip */ |
113 | #include <netinet/tcp.h> /* for struct tcphdr */ |
114 | |
115 | |
116 | #include <net/bpf.h> |
117 | |
118 | #include <dev/pci/pcireg.h> |
119 | #include <dev/pci/pcivar.h> |
120 | #include <dev/pci/pcidevs.h> |
121 | |
122 | #include <dev/mii/mii.h> |
123 | #include <dev/mii/miivar.h> |
124 | #include <dev/mii/miidevs.h> |
125 | #include <dev/mii/brgphyreg.h> |
126 | |
127 | #include <dev/pci/if_bgereg.h> |
128 | #include <dev/pci/if_bgevar.h> |
129 | |
130 | #include <prop/proplib.h> |
131 | |
132 | #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ |
133 | |
134 | |
135 | /* |
136 | * Tunable thresholds for rx-side bge interrupt mitigation. |
137 | */ |
138 | |
139 | /* |
140 | * The pairs of values below were obtained from empirical measurement |
141 | * on bcm5700 rev B2; they ar designed to give roughly 1 receive |
142 | * interrupt for every N packets received, where N is, approximately, |
143 | * the second value (rx_max_bds) in each pair. The values are chosen |
144 | * such that moving from one pair to the succeeding pair was observed |
145 | * to roughly halve interrupt rate under sustained input packet load. |
146 | * The values were empirically chosen to avoid overflowing internal |
147 | * limits on the bcm5700: increasing rx_ticks much beyond 600 |
148 | * results in internal wrapping and higher interrupt rates. |
149 | * The limit of 46 frames was chosen to match NFS workloads. |
150 | * |
151 | * These values also work well on bcm5701, bcm5704C, and (less |
152 | * tested) bcm5703. On other chipsets, (including the Altima chip |
153 | * family), the larger values may overflow internal chip limits, |
154 | * leading to increasing interrupt rates rather than lower interrupt |
155 | * rates. |
156 | * |
157 | * Applications using heavy interrupt mitigation (interrupting every |
158 | * 32 or 46 frames) in both directions may need to increase the TCP |
159 | * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain |
160 | * full link bandwidth, due to ACKs and window updates lingering |
161 | * in the RX queue during the 30-to-40-frame interrupt-mitigation window. |
162 | */ |
163 | static const struct bge_load_rx_thresh { |
164 | int rx_ticks; |
165 | int rx_max_bds; } |
166 | bge_rx_threshes[] = { |
167 | { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */ |
168 | { 32, 2 }, |
169 | { 50, 4 }, |
170 | { 100, 8 }, |
171 | { 192, 16 }, |
172 | { 416, 32 }, |
173 | { 598, 46 } |
174 | }; |
175 | #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) |
176 | |
177 | /* XXX patchable; should be sysctl'able */ |
178 | static int bge_auto_thresh = 1; |
179 | static int bge_rx_thresh_lvl; |
180 | |
181 | static int bge_rxthresh_nodenum; |
182 | |
183 | typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); |
184 | |
185 | static uint32_t bge_chipid(const struct pci_attach_args *); |
186 | static int bge_can_use_msi(struct bge_softc *); |
187 | static int bge_probe(device_t, cfdata_t, void *); |
188 | static void bge_attach(device_t, device_t, void *); |
189 | static int bge_detach(device_t, int); |
190 | static void bge_release_resources(struct bge_softc *); |
191 | |
192 | static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); |
193 | static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); |
194 | static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); |
195 | static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); |
196 | static int bge_get_eaddr(struct bge_softc *, uint8_t[]); |
197 | |
198 | static void bge_txeof(struct bge_softc *); |
199 | static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); |
200 | static void bge_rxeof(struct bge_softc *); |
201 | |
202 | static void bge_asf_driver_up (struct bge_softc *); |
203 | static void bge_tick(void *); |
204 | static void bge_stats_update(struct bge_softc *); |
205 | static void bge_stats_update_regs(struct bge_softc *); |
206 | static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); |
207 | |
208 | static int bge_intr(void *); |
209 | static void bge_start(struct ifnet *); |
210 | static int bge_ifflags_cb(struct ethercom *); |
211 | static int bge_ioctl(struct ifnet *, u_long, void *); |
212 | static int bge_init(struct ifnet *); |
213 | static void bge_stop(struct ifnet *, int); |
214 | static void bge_watchdog(struct ifnet *); |
215 | static int bge_ifmedia_upd(struct ifnet *); |
216 | static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
217 | |
218 | static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); |
219 | static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); |
220 | |
221 | static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); |
222 | static int bge_read_eeprom(struct bge_softc *, void *, int, int); |
223 | static void bge_setmulti(struct bge_softc *); |
224 | |
225 | static void bge_handle_events(struct bge_softc *); |
226 | static int bge_alloc_jumbo_mem(struct bge_softc *); |
227 | #if 0 /* XXX */ |
228 | static void bge_free_jumbo_mem(struct bge_softc *); |
229 | #endif |
230 | static void *bge_jalloc(struct bge_softc *); |
231 | static void bge_jfree(struct mbuf *, void *, size_t, void *); |
232 | static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, |
233 | bus_dmamap_t); |
234 | static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); |
235 | static int bge_init_rx_ring_std(struct bge_softc *); |
236 | static void bge_free_rx_ring_std(struct bge_softc *); |
237 | static int bge_init_rx_ring_jumbo(struct bge_softc *); |
238 | static void bge_free_rx_ring_jumbo(struct bge_softc *); |
239 | static void bge_free_tx_ring(struct bge_softc *); |
240 | static int bge_init_tx_ring(struct bge_softc *); |
241 | |
242 | static int bge_chipinit(struct bge_softc *); |
243 | static int bge_blockinit(struct bge_softc *); |
244 | static int bge_phy_addr(struct bge_softc *); |
245 | static uint32_t bge_readmem_ind(struct bge_softc *, int); |
246 | static void bge_writemem_ind(struct bge_softc *, int, int); |
247 | static void bge_writembx(struct bge_softc *, int, int); |
248 | static void bge_writembx_flush(struct bge_softc *, int, int); |
249 | static void bge_writemem_direct(struct bge_softc *, int, int); |
250 | static void bge_writereg_ind(struct bge_softc *, int, int); |
251 | static void bge_set_max_readrq(struct bge_softc *); |
252 | |
253 | static int bge_miibus_readreg(device_t, int, int); |
254 | static void bge_miibus_writereg(device_t, int, int, int); |
255 | static void bge_miibus_statchg(struct ifnet *); |
256 | |
257 | #define BGE_RESET_SHUTDOWN 0 |
258 | #define BGE_RESET_START 1 |
259 | #define BGE_RESET_SUSPEND 2 |
260 | static void bge_sig_post_reset(struct bge_softc *, int); |
261 | static void bge_sig_legacy(struct bge_softc *, int); |
262 | static void bge_sig_pre_reset(struct bge_softc *, int); |
263 | static void bge_wait_for_event_ack(struct bge_softc *); |
264 | static void bge_stop_fw(struct bge_softc *); |
265 | static int bge_reset(struct bge_softc *); |
266 | static void bge_link_upd(struct bge_softc *); |
267 | static void bge_sysctl_init(struct bge_softc *); |
268 | static int bge_sysctl_verify(SYSCTLFN_PROTO); |
269 | |
270 | static void bge_ape_lock_init(struct bge_softc *); |
271 | static void bge_ape_read_fw_ver(struct bge_softc *); |
272 | static int bge_ape_lock(struct bge_softc *, int); |
273 | static void bge_ape_unlock(struct bge_softc *, int); |
274 | static void bge_ape_send_event(struct bge_softc *, uint32_t); |
275 | static void bge_ape_driver_state_change(struct bge_softc *, int); |
276 | |
277 | #ifdef BGE_DEBUG |
278 | #define DPRINTF(x) if (bgedebug) printf x |
279 | #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x |
280 | #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) |
281 | int bgedebug = 0; |
282 | int bge_tso_debug = 0; |
283 | void bge_debug_info(struct bge_softc *); |
284 | #else |
285 | #define DPRINTF(x) |
286 | #define DPRINTFN(n,x) |
287 | #define BGE_TSO_PRINTF(x) |
288 | #endif |
289 | |
290 | #ifdef BGE_EVENT_COUNTERS |
291 | #define BGE_EVCNT_INCR(ev) (ev).ev_count++ |
292 | #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) |
293 | #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) |
294 | #else |
295 | #define BGE_EVCNT_INCR(ev) /* nothing */ |
296 | #define BGE_EVCNT_ADD(ev, val) /* nothing */ |
297 | #define BGE_EVCNT_UPD(ev, val) /* nothing */ |
298 | #endif |
299 | |
300 | static const struct bge_product { |
301 | pci_vendor_id_t bp_vendor; |
302 | pci_product_id_t bp_product; |
303 | const char *bp_name; |
304 | } bge_products[] = { |
305 | /* |
306 | * The BCM5700 documentation seems to indicate that the hardware |
307 | * still has the Alteon vendor ID burned into it, though it |
308 | * should always be overridden by the value in the EEPROM. We'll |
309 | * check for it anyway. |
310 | */ |
311 | { PCI_VENDOR_ALTEON, |
312 | PCI_PRODUCT_ALTEON_BCM5700, |
313 | "Broadcom BCM5700 Gigabit Ethernet" , |
314 | }, |
315 | { PCI_VENDOR_ALTEON, |
316 | PCI_PRODUCT_ALTEON_BCM5701, |
317 | "Broadcom BCM5701 Gigabit Ethernet" , |
318 | }, |
319 | { PCI_VENDOR_ALTIMA, |
320 | PCI_PRODUCT_ALTIMA_AC1000, |
321 | "Altima AC1000 Gigabit Ethernet" , |
322 | }, |
323 | { PCI_VENDOR_ALTIMA, |
324 | PCI_PRODUCT_ALTIMA_AC1001, |
325 | "Altima AC1001 Gigabit Ethernet" , |
326 | }, |
327 | { PCI_VENDOR_ALTIMA, |
328 | PCI_PRODUCT_ALTIMA_AC1003, |
329 | "Altima AC1003 Gigabit Ethernet" , |
330 | }, |
331 | { PCI_VENDOR_ALTIMA, |
332 | PCI_PRODUCT_ALTIMA_AC9100, |
333 | "Altima AC9100 Gigabit Ethernet" , |
334 | }, |
335 | { PCI_VENDOR_APPLE, |
336 | PCI_PRODUCT_APPLE_BCM5701, |
337 | "APPLE BCM5701 Gigabit Ethernet" , |
338 | }, |
339 | { PCI_VENDOR_BROADCOM, |
340 | PCI_PRODUCT_BROADCOM_BCM5700, |
341 | "Broadcom BCM5700 Gigabit Ethernet" , |
342 | }, |
343 | { PCI_VENDOR_BROADCOM, |
344 | PCI_PRODUCT_BROADCOM_BCM5701, |
345 | "Broadcom BCM5701 Gigabit Ethernet" , |
346 | }, |
347 | { PCI_VENDOR_BROADCOM, |
348 | PCI_PRODUCT_BROADCOM_BCM5702, |
349 | "Broadcom BCM5702 Gigabit Ethernet" , |
350 | }, |
351 | { PCI_VENDOR_BROADCOM, |
352 | PCI_PRODUCT_BROADCOM_BCM5702X, |
353 | "Broadcom BCM5702X Gigabit Ethernet" }, |
354 | { PCI_VENDOR_BROADCOM, |
355 | PCI_PRODUCT_BROADCOM_BCM5703, |
356 | "Broadcom BCM5703 Gigabit Ethernet" , |
357 | }, |
358 | { PCI_VENDOR_BROADCOM, |
359 | PCI_PRODUCT_BROADCOM_BCM5703X, |
360 | "Broadcom BCM5703X Gigabit Ethernet" , |
361 | }, |
362 | { PCI_VENDOR_BROADCOM, |
363 | PCI_PRODUCT_BROADCOM_BCM5703_ALT, |
364 | "Broadcom BCM5703 Gigabit Ethernet" , |
365 | }, |
366 | { PCI_VENDOR_BROADCOM, |
367 | PCI_PRODUCT_BROADCOM_BCM5704C, |
368 | "Broadcom BCM5704C Dual Gigabit Ethernet" , |
369 | }, |
370 | { PCI_VENDOR_BROADCOM, |
371 | PCI_PRODUCT_BROADCOM_BCM5704S, |
372 | "Broadcom BCM5704S Dual Gigabit Ethernet" , |
373 | }, |
374 | { PCI_VENDOR_BROADCOM, |
375 | PCI_PRODUCT_BROADCOM_BCM5705, |
376 | "Broadcom BCM5705 Gigabit Ethernet" , |
377 | }, |
378 | { PCI_VENDOR_BROADCOM, |
379 | PCI_PRODUCT_BROADCOM_BCM5705F, |
380 | "Broadcom BCM5705F Gigabit Ethernet" , |
381 | }, |
382 | { PCI_VENDOR_BROADCOM, |
383 | PCI_PRODUCT_BROADCOM_BCM5705K, |
384 | "Broadcom BCM5705K Gigabit Ethernet" , |
385 | }, |
386 | { PCI_VENDOR_BROADCOM, |
387 | PCI_PRODUCT_BROADCOM_BCM5705M, |
388 | "Broadcom BCM5705M Gigabit Ethernet" , |
389 | }, |
390 | { PCI_VENDOR_BROADCOM, |
391 | PCI_PRODUCT_BROADCOM_BCM5705M_ALT, |
392 | "Broadcom BCM5705M Gigabit Ethernet" , |
393 | }, |
394 | { PCI_VENDOR_BROADCOM, |
395 | PCI_PRODUCT_BROADCOM_BCM5714, |
396 | "Broadcom BCM5714 Gigabit Ethernet" , |
397 | }, |
398 | { PCI_VENDOR_BROADCOM, |
399 | PCI_PRODUCT_BROADCOM_BCM5714S, |
400 | "Broadcom BCM5714S Gigabit Ethernet" , |
401 | }, |
402 | { PCI_VENDOR_BROADCOM, |
403 | PCI_PRODUCT_BROADCOM_BCM5715, |
404 | "Broadcom BCM5715 Gigabit Ethernet" , |
405 | }, |
406 | { PCI_VENDOR_BROADCOM, |
407 | PCI_PRODUCT_BROADCOM_BCM5715S, |
408 | "Broadcom BCM5715S Gigabit Ethernet" , |
409 | }, |
410 | { PCI_VENDOR_BROADCOM, |
411 | PCI_PRODUCT_BROADCOM_BCM5717, |
412 | "Broadcom BCM5717 Gigabit Ethernet" , |
413 | }, |
414 | { PCI_VENDOR_BROADCOM, |
415 | PCI_PRODUCT_BROADCOM_BCM5718, |
416 | "Broadcom BCM5718 Gigabit Ethernet" , |
417 | }, |
418 | { PCI_VENDOR_BROADCOM, |
419 | PCI_PRODUCT_BROADCOM_BCM5719, |
420 | "Broadcom BCM5719 Gigabit Ethernet" , |
421 | }, |
422 | { PCI_VENDOR_BROADCOM, |
423 | PCI_PRODUCT_BROADCOM_BCM5720, |
424 | "Broadcom BCM5720 Gigabit Ethernet" , |
425 | }, |
426 | { PCI_VENDOR_BROADCOM, |
427 | PCI_PRODUCT_BROADCOM_BCM5721, |
428 | "Broadcom BCM5721 Gigabit Ethernet" , |
429 | }, |
430 | { PCI_VENDOR_BROADCOM, |
431 | PCI_PRODUCT_BROADCOM_BCM5722, |
432 | "Broadcom BCM5722 Gigabit Ethernet" , |
433 | }, |
434 | { PCI_VENDOR_BROADCOM, |
435 | PCI_PRODUCT_BROADCOM_BCM5723, |
436 | "Broadcom BCM5723 Gigabit Ethernet" , |
437 | }, |
438 | { PCI_VENDOR_BROADCOM, |
439 | PCI_PRODUCT_BROADCOM_BCM5750, |
440 | "Broadcom BCM5750 Gigabit Ethernet" , |
441 | }, |
442 | { PCI_VENDOR_BROADCOM, |
443 | PCI_PRODUCT_BROADCOM_BCM5751, |
444 | "Broadcom BCM5751 Gigabit Ethernet" , |
445 | }, |
446 | { PCI_VENDOR_BROADCOM, |
447 | PCI_PRODUCT_BROADCOM_BCM5751F, |
448 | "Broadcom BCM5751F Gigabit Ethernet" , |
449 | }, |
450 | { PCI_VENDOR_BROADCOM, |
451 | PCI_PRODUCT_BROADCOM_BCM5751M, |
452 | "Broadcom BCM5751M Gigabit Ethernet" , |
453 | }, |
454 | { PCI_VENDOR_BROADCOM, |
455 | PCI_PRODUCT_BROADCOM_BCM5752, |
456 | "Broadcom BCM5752 Gigabit Ethernet" , |
457 | }, |
458 | { PCI_VENDOR_BROADCOM, |
459 | PCI_PRODUCT_BROADCOM_BCM5752M, |
460 | "Broadcom BCM5752M Gigabit Ethernet" , |
461 | }, |
462 | { PCI_VENDOR_BROADCOM, |
463 | PCI_PRODUCT_BROADCOM_BCM5753, |
464 | "Broadcom BCM5753 Gigabit Ethernet" , |
465 | }, |
466 | { PCI_VENDOR_BROADCOM, |
467 | PCI_PRODUCT_BROADCOM_BCM5753F, |
468 | "Broadcom BCM5753F Gigabit Ethernet" , |
469 | }, |
470 | { PCI_VENDOR_BROADCOM, |
471 | PCI_PRODUCT_BROADCOM_BCM5753M, |
472 | "Broadcom BCM5753M Gigabit Ethernet" , |
473 | }, |
474 | { PCI_VENDOR_BROADCOM, |
475 | PCI_PRODUCT_BROADCOM_BCM5754, |
476 | "Broadcom BCM5754 Gigabit Ethernet" , |
477 | }, |
478 | { PCI_VENDOR_BROADCOM, |
479 | PCI_PRODUCT_BROADCOM_BCM5754M, |
480 | "Broadcom BCM5754M Gigabit Ethernet" , |
481 | }, |
482 | { PCI_VENDOR_BROADCOM, |
483 | PCI_PRODUCT_BROADCOM_BCM5755, |
484 | "Broadcom BCM5755 Gigabit Ethernet" , |
485 | }, |
486 | { PCI_VENDOR_BROADCOM, |
487 | PCI_PRODUCT_BROADCOM_BCM5755M, |
488 | "Broadcom BCM5755M Gigabit Ethernet" , |
489 | }, |
490 | { PCI_VENDOR_BROADCOM, |
491 | PCI_PRODUCT_BROADCOM_BCM5756, |
492 | "Broadcom BCM5756 Gigabit Ethernet" , |
493 | }, |
494 | { PCI_VENDOR_BROADCOM, |
495 | PCI_PRODUCT_BROADCOM_BCM5761, |
496 | "Broadcom BCM5761 Gigabit Ethernet" , |
497 | }, |
498 | { PCI_VENDOR_BROADCOM, |
499 | PCI_PRODUCT_BROADCOM_BCM5761E, |
500 | "Broadcom BCM5761E Gigabit Ethernet" , |
501 | }, |
502 | { PCI_VENDOR_BROADCOM, |
503 | PCI_PRODUCT_BROADCOM_BCM5761S, |
504 | "Broadcom BCM5761S Gigabit Ethernet" , |
505 | }, |
506 | { PCI_VENDOR_BROADCOM, |
507 | PCI_PRODUCT_BROADCOM_BCM5761SE, |
508 | "Broadcom BCM5761SE Gigabit Ethernet" , |
509 | }, |
510 | { PCI_VENDOR_BROADCOM, |
511 | PCI_PRODUCT_BROADCOM_BCM5764, |
512 | "Broadcom BCM5764 Gigabit Ethernet" , |
513 | }, |
514 | { PCI_VENDOR_BROADCOM, |
515 | PCI_PRODUCT_BROADCOM_BCM5780, |
516 | "Broadcom BCM5780 Gigabit Ethernet" , |
517 | }, |
518 | { PCI_VENDOR_BROADCOM, |
519 | PCI_PRODUCT_BROADCOM_BCM5780S, |
520 | "Broadcom BCM5780S Gigabit Ethernet" , |
521 | }, |
522 | { PCI_VENDOR_BROADCOM, |
523 | PCI_PRODUCT_BROADCOM_BCM5781, |
524 | "Broadcom BCM5781 Gigabit Ethernet" , |
525 | }, |
526 | { PCI_VENDOR_BROADCOM, |
527 | PCI_PRODUCT_BROADCOM_BCM5782, |
528 | "Broadcom BCM5782 Gigabit Ethernet" , |
529 | }, |
530 | { PCI_VENDOR_BROADCOM, |
531 | PCI_PRODUCT_BROADCOM_BCM5784M, |
532 | "BCM5784M NetLink 1000baseT Ethernet" , |
533 | }, |
534 | { PCI_VENDOR_BROADCOM, |
535 | PCI_PRODUCT_BROADCOM_BCM5785F, |
536 | "BCM5785F NetLink 10/100 Ethernet" , |
537 | }, |
538 | { PCI_VENDOR_BROADCOM, |
539 | PCI_PRODUCT_BROADCOM_BCM5785G, |
540 | "BCM5785G NetLink 1000baseT Ethernet" , |
541 | }, |
542 | { PCI_VENDOR_BROADCOM, |
543 | PCI_PRODUCT_BROADCOM_BCM5786, |
544 | "Broadcom BCM5786 Gigabit Ethernet" , |
545 | }, |
546 | { PCI_VENDOR_BROADCOM, |
547 | PCI_PRODUCT_BROADCOM_BCM5787, |
548 | "Broadcom BCM5787 Gigabit Ethernet" , |
549 | }, |
550 | { PCI_VENDOR_BROADCOM, |
551 | PCI_PRODUCT_BROADCOM_BCM5787F, |
552 | "Broadcom BCM5787F 10/100 Ethernet" , |
553 | }, |
554 | { PCI_VENDOR_BROADCOM, |
555 | PCI_PRODUCT_BROADCOM_BCM5787M, |
556 | "Broadcom BCM5787M Gigabit Ethernet" , |
557 | }, |
558 | { PCI_VENDOR_BROADCOM, |
559 | PCI_PRODUCT_BROADCOM_BCM5788, |
560 | "Broadcom BCM5788 Gigabit Ethernet" , |
561 | }, |
562 | { PCI_VENDOR_BROADCOM, |
563 | PCI_PRODUCT_BROADCOM_BCM5789, |
564 | "Broadcom BCM5789 Gigabit Ethernet" , |
565 | }, |
566 | { PCI_VENDOR_BROADCOM, |
567 | PCI_PRODUCT_BROADCOM_BCM5901, |
568 | "Broadcom BCM5901 Fast Ethernet" , |
569 | }, |
570 | { PCI_VENDOR_BROADCOM, |
571 | PCI_PRODUCT_BROADCOM_BCM5901A2, |
572 | "Broadcom BCM5901A2 Fast Ethernet" , |
573 | }, |
574 | { PCI_VENDOR_BROADCOM, |
575 | PCI_PRODUCT_BROADCOM_BCM5903M, |
576 | "Broadcom BCM5903M Fast Ethernet" , |
577 | }, |
578 | { PCI_VENDOR_BROADCOM, |
579 | PCI_PRODUCT_BROADCOM_BCM5906, |
580 | "Broadcom BCM5906 Fast Ethernet" , |
581 | }, |
582 | { PCI_VENDOR_BROADCOM, |
583 | PCI_PRODUCT_BROADCOM_BCM5906M, |
584 | "Broadcom BCM5906M Fast Ethernet" , |
585 | }, |
586 | { PCI_VENDOR_BROADCOM, |
587 | PCI_PRODUCT_BROADCOM_BCM57760, |
588 | "Broadcom BCM57760 Fast Ethernet" , |
589 | }, |
590 | { PCI_VENDOR_BROADCOM, |
591 | PCI_PRODUCT_BROADCOM_BCM57761, |
592 | "Broadcom BCM57761 Fast Ethernet" , |
593 | }, |
594 | { PCI_VENDOR_BROADCOM, |
595 | PCI_PRODUCT_BROADCOM_BCM57762, |
596 | "Broadcom BCM57762 Gigabit Ethernet" , |
597 | }, |
598 | { PCI_VENDOR_BROADCOM, |
599 | PCI_PRODUCT_BROADCOM_BCM57765, |
600 | "Broadcom BCM57765 Fast Ethernet" , |
601 | }, |
602 | { PCI_VENDOR_BROADCOM, |
603 | PCI_PRODUCT_BROADCOM_BCM57766, |
604 | "Broadcom BCM57766 Fast Ethernet" , |
605 | }, |
606 | { PCI_VENDOR_BROADCOM, |
607 | PCI_PRODUCT_BROADCOM_BCM57780, |
608 | "Broadcom BCM57780 Fast Ethernet" , |
609 | }, |
610 | { PCI_VENDOR_BROADCOM, |
611 | PCI_PRODUCT_BROADCOM_BCM57781, |
612 | "Broadcom BCM57781 Fast Ethernet" , |
613 | }, |
614 | { PCI_VENDOR_BROADCOM, |
615 | PCI_PRODUCT_BROADCOM_BCM57782, |
616 | "Broadcom BCM57782 Fast Ethernet" , |
617 | }, |
618 | { PCI_VENDOR_BROADCOM, |
619 | PCI_PRODUCT_BROADCOM_BCM57785, |
620 | "Broadcom BCM57785 Fast Ethernet" , |
621 | }, |
622 | { PCI_VENDOR_BROADCOM, |
623 | PCI_PRODUCT_BROADCOM_BCM57786, |
624 | "Broadcom BCM57786 Fast Ethernet" , |
625 | }, |
626 | { PCI_VENDOR_BROADCOM, |
627 | PCI_PRODUCT_BROADCOM_BCM57788, |
628 | "Broadcom BCM57788 Fast Ethernet" , |
629 | }, |
630 | { PCI_VENDOR_BROADCOM, |
631 | PCI_PRODUCT_BROADCOM_BCM57790, |
632 | "Broadcom BCM57790 Fast Ethernet" , |
633 | }, |
634 | { PCI_VENDOR_BROADCOM, |
635 | PCI_PRODUCT_BROADCOM_BCM57791, |
636 | "Broadcom BCM57791 Fast Ethernet" , |
637 | }, |
638 | { PCI_VENDOR_BROADCOM, |
639 | PCI_PRODUCT_BROADCOM_BCM57795, |
640 | "Broadcom BCM57795 Fast Ethernet" , |
641 | }, |
642 | { PCI_VENDOR_SCHNEIDERKOCH, |
643 | PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, |
644 | "SysKonnect SK-9Dx1 Gigabit Ethernet" , |
645 | }, |
646 | { PCI_VENDOR_3COM, |
647 | PCI_PRODUCT_3COM_3C996, |
648 | "3Com 3c996 Gigabit Ethernet" , |
649 | }, |
650 | { PCI_VENDOR_FUJITSU4, |
651 | PCI_PRODUCT_FUJITSU4_PW008GE4, |
652 | "Fujitsu PW008GE4 Gigabit Ethernet" , |
653 | }, |
654 | { PCI_VENDOR_FUJITSU4, |
655 | PCI_PRODUCT_FUJITSU4_PW008GE5, |
656 | "Fujitsu PW008GE5 Gigabit Ethernet" , |
657 | }, |
658 | { PCI_VENDOR_FUJITSU4, |
659 | PCI_PRODUCT_FUJITSU4_PP250_450_LAN, |
660 | "Fujitsu Primepower 250/450 Gigabit Ethernet" , |
661 | }, |
662 | { 0, |
663 | 0, |
664 | NULL }, |
665 | }; |
666 | |
667 | #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE) |
668 | #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY) |
669 | #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS) |
670 | #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY) |
671 | #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS) |
672 | #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS) |
673 | #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY) |
674 | #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS) |
675 | #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS) |
676 | |
677 | static const struct bge_revision { |
678 | uint32_t br_chipid; |
679 | const char *br_name; |
680 | } bge_revisions[] = { |
681 | { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, |
682 | { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, |
683 | { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, |
684 | { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, |
685 | { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, |
686 | { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, |
687 | { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, |
688 | { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, |
689 | { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, |
690 | { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, |
691 | { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, |
692 | { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, |
693 | { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, |
694 | { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, |
695 | { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, |
696 | { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, |
697 | { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, |
698 | { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, |
699 | { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, |
700 | { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, |
701 | { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, |
702 | { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, |
703 | { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, |
704 | { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, |
705 | { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, |
706 | { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, |
707 | { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, |
708 | { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, |
709 | { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, |
710 | { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, |
711 | { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, |
712 | { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, |
713 | { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, |
714 | { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, |
715 | { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, |
716 | { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, |
717 | { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, |
718 | { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, |
719 | { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, |
720 | { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, |
721 | { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, |
722 | { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, |
723 | { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, |
724 | { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, |
725 | { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, |
726 | { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, |
727 | { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, |
728 | { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, |
729 | { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, |
730 | { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, |
731 | { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, |
732 | { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, |
733 | { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, |
734 | { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, |
735 | { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, |
736 | { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" }, |
737 | /* 5754 and 5787 share the same ASIC ID */ |
738 | { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, |
739 | { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, |
740 | { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, |
741 | { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" }, |
742 | { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, |
743 | { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, |
744 | { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, |
745 | { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, |
746 | { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, |
747 | { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, |
748 | |
749 | { 0, NULL } |
750 | }; |
751 | |
752 | /* |
753 | * Some defaults for major revisions, so that newer steppings |
754 | * that we don't know about have a shot at working. |
755 | */ |
756 | static const struct bge_revision bge_majorrevs[] = { |
757 | { BGE_ASICREV_BCM5700, "unknown BCM5700" }, |
758 | { BGE_ASICREV_BCM5701, "unknown BCM5701" }, |
759 | { BGE_ASICREV_BCM5703, "unknown BCM5703" }, |
760 | { BGE_ASICREV_BCM5704, "unknown BCM5704" }, |
761 | { BGE_ASICREV_BCM5705, "unknown BCM5705" }, |
762 | { BGE_ASICREV_BCM5750, "unknown BCM5750" }, |
763 | { BGE_ASICREV_BCM5714, "unknown BCM5714" }, |
764 | { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, |
765 | { BGE_ASICREV_BCM5752, "unknown BCM5752" }, |
766 | { BGE_ASICREV_BCM5780, "unknown BCM5780" }, |
767 | { BGE_ASICREV_BCM5755, "unknown BCM5755" }, |
768 | { BGE_ASICREV_BCM5761, "unknown BCM5761" }, |
769 | { BGE_ASICREV_BCM5784, "unknown BCM5784" }, |
770 | { BGE_ASICREV_BCM5785, "unknown BCM5785" }, |
771 | /* 5754 and 5787 share the same ASIC ID */ |
772 | { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, |
773 | { BGE_ASICREV_BCM5906, "unknown BCM5906" }, |
774 | { BGE_ASICREV_BCM57765, "unknown BCM57765" }, |
775 | { BGE_ASICREV_BCM57766, "unknown BCM57766" }, |
776 | { BGE_ASICREV_BCM57780, "unknown BCM57780" }, |
777 | { BGE_ASICREV_BCM5717, "unknown BCM5717" }, |
778 | { BGE_ASICREV_BCM5719, "unknown BCM5719" }, |
779 | { BGE_ASICREV_BCM5720, "unknown BCM5720" }, |
780 | |
781 | { 0, NULL } |
782 | }; |
783 | |
784 | static int bge_allow_asf = 1; |
785 | |
786 | CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc), |
787 | bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); |
788 | |
789 | static uint32_t |
790 | bge_readmem_ind(struct bge_softc *sc, int off) |
791 | { |
792 | pcireg_t val; |
793 | |
794 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && |
795 | off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) |
796 | return 0; |
797 | |
798 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); |
799 | val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); |
800 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); |
801 | return val; |
802 | } |
803 | |
804 | static void |
805 | bge_writemem_ind(struct bge_softc *sc, int off, int val) |
806 | { |
807 | |
808 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); |
809 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); |
810 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); |
811 | } |
812 | |
813 | /* |
814 | * PCI Express only |
815 | */ |
816 | static void |
817 | bge_set_max_readrq(struct bge_softc *sc) |
818 | { |
819 | pcireg_t val; |
820 | |
821 | val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap |
822 | + PCIE_DCSR); |
823 | val &= ~PCIE_DCSR_MAX_READ_REQ; |
824 | switch (sc->bge_expmrq) { |
825 | case 2048: |
826 | val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048; |
827 | break; |
828 | case 4096: |
829 | val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; |
830 | break; |
831 | default: |
832 | panic("incorrect expmrq value(%d)" , sc->bge_expmrq); |
833 | break; |
834 | } |
835 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap |
836 | + PCIE_DCSR, val); |
837 | } |
838 | |
839 | #ifdef notdef |
840 | static uint32_t |
841 | bge_readreg_ind(struct bge_softc *sc, int off) |
842 | { |
843 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); |
844 | return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); |
845 | } |
846 | #endif |
847 | |
848 | static void |
849 | bge_writereg_ind(struct bge_softc *sc, int off, int val) |
850 | { |
851 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); |
852 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); |
853 | } |
854 | |
855 | static void |
856 | bge_writemem_direct(struct bge_softc *sc, int off, int val) |
857 | { |
858 | CSR_WRITE_4(sc, off, val); |
859 | } |
860 | |
861 | static void |
862 | bge_writembx(struct bge_softc *sc, int off, int val) |
863 | { |
864 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) |
865 | off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; |
866 | |
867 | CSR_WRITE_4(sc, off, val); |
868 | } |
869 | |
870 | static void |
871 | bge_writembx_flush(struct bge_softc *sc, int off, int val) |
872 | { |
873 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) |
874 | off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; |
875 | |
876 | CSR_WRITE_4_FLUSH(sc, off, val); |
877 | } |
878 | |
879 | /* |
880 | * Clear all stale locks and select the lock for this driver instance. |
881 | */ |
882 | void |
883 | bge_ape_lock_init(struct bge_softc *sc) |
884 | { |
885 | struct pci_attach_args *pa = &(sc->bge_pa); |
886 | uint32_t bit, regbase; |
887 | int i; |
888 | |
889 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) |
890 | regbase = BGE_APE_LOCK_GRANT; |
891 | else |
892 | regbase = BGE_APE_PER_LOCK_GRANT; |
893 | |
894 | /* Clear any stale locks. */ |
895 | for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { |
896 | switch (i) { |
897 | case BGE_APE_LOCK_PHY0: |
898 | case BGE_APE_LOCK_PHY1: |
899 | case BGE_APE_LOCK_PHY2: |
900 | case BGE_APE_LOCK_PHY3: |
901 | bit = BGE_APE_LOCK_GRANT_DRIVER0; |
902 | break; |
903 | default: |
904 | if (pa->pa_function == 0) |
905 | bit = BGE_APE_LOCK_GRANT_DRIVER0; |
906 | else |
907 | bit = (1 << pa->pa_function); |
908 | } |
909 | APE_WRITE_4(sc, regbase + 4 * i, bit); |
910 | } |
911 | |
912 | /* Select the PHY lock based on the device's function number. */ |
913 | switch (pa->pa_function) { |
914 | case 0: |
915 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; |
916 | break; |
917 | case 1: |
918 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; |
919 | break; |
920 | case 2: |
921 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; |
922 | break; |
923 | case 3: |
924 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; |
925 | break; |
926 | default: |
927 | printf("%s: PHY lock not supported on function\n" , |
928 | device_xname(sc->bge_dev)); |
929 | break; |
930 | } |
931 | } |
932 | |
933 | /* |
934 | * Check for APE firmware, set flags, and print version info. |
935 | */ |
936 | void |
937 | bge_ape_read_fw_ver(struct bge_softc *sc) |
938 | { |
939 | const char *fwtype; |
940 | uint32_t apedata, features; |
941 | |
942 | /* Check for a valid APE signature in shared memory. */ |
943 | apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); |
944 | if (apedata != BGE_APE_SEG_SIG_MAGIC) { |
945 | sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; |
946 | return; |
947 | } |
948 | |
949 | /* Check if APE firmware is running. */ |
950 | apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); |
951 | if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { |
952 | printf("%s: APE signature found but FW status not ready! " |
953 | "0x%08x\n" , device_xname(sc->bge_dev), apedata); |
954 | return; |
955 | } |
956 | |
957 | sc->bge_mfw_flags |= BGE_MFW_ON_APE; |
958 | |
959 | /* Fetch the APE firwmare type and version. */ |
960 | apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); |
961 | features = APE_READ_4(sc, BGE_APE_FW_FEATURES); |
962 | if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { |
963 | sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; |
964 | fwtype = "NCSI" ; |
965 | } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { |
966 | sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; |
967 | fwtype = "DASH" ; |
968 | } else |
969 | fwtype = "UNKN" ; |
970 | |
971 | /* Print the APE firmware version. */ |
972 | aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n" , fwtype, |
973 | (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, |
974 | (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, |
975 | (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, |
976 | (apedata & BGE_APE_FW_VERSION_BLDMSK)); |
977 | } |
978 | |
979 | int |
980 | bge_ape_lock(struct bge_softc *sc, int locknum) |
981 | { |
982 | struct pci_attach_args *pa = &(sc->bge_pa); |
983 | uint32_t bit, gnt, req, status; |
984 | int i, off; |
985 | |
986 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) |
987 | return (0); |
988 | |
989 | /* Lock request/grant registers have different bases. */ |
990 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { |
991 | req = BGE_APE_LOCK_REQ; |
992 | gnt = BGE_APE_LOCK_GRANT; |
993 | } else { |
994 | req = BGE_APE_PER_LOCK_REQ; |
995 | gnt = BGE_APE_PER_LOCK_GRANT; |
996 | } |
997 | |
998 | off = 4 * locknum; |
999 | |
1000 | switch (locknum) { |
1001 | case BGE_APE_LOCK_GPIO: |
1002 | /* Lock required when using GPIO. */ |
1003 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) |
1004 | return (0); |
1005 | if (pa->pa_function == 0) |
1006 | bit = BGE_APE_LOCK_REQ_DRIVER0; |
1007 | else |
1008 | bit = (1 << pa->pa_function); |
1009 | break; |
1010 | case BGE_APE_LOCK_GRC: |
1011 | /* Lock required to reset the device. */ |
1012 | if (pa->pa_function == 0) |
1013 | bit = BGE_APE_LOCK_REQ_DRIVER0; |
1014 | else |
1015 | bit = (1 << pa->pa_function); |
1016 | break; |
1017 | case BGE_APE_LOCK_MEM: |
1018 | /* Lock required when accessing certain APE memory. */ |
1019 | if (pa->pa_function == 0) |
1020 | bit = BGE_APE_LOCK_REQ_DRIVER0; |
1021 | else |
1022 | bit = (1 << pa->pa_function); |
1023 | break; |
1024 | case BGE_APE_LOCK_PHY0: |
1025 | case BGE_APE_LOCK_PHY1: |
1026 | case BGE_APE_LOCK_PHY2: |
1027 | case BGE_APE_LOCK_PHY3: |
1028 | /* Lock required when accessing PHYs. */ |
1029 | bit = BGE_APE_LOCK_REQ_DRIVER0; |
1030 | break; |
1031 | default: |
1032 | return (EINVAL); |
1033 | } |
1034 | |
1035 | /* Request a lock. */ |
1036 | APE_WRITE_4_FLUSH(sc, req + off, bit); |
1037 | |
1038 | /* Wait up to 1 second to acquire lock. */ |
1039 | for (i = 0; i < 20000; i++) { |
1040 | status = APE_READ_4(sc, gnt + off); |
1041 | if (status == bit) |
1042 | break; |
1043 | DELAY(50); |
1044 | } |
1045 | |
1046 | /* Handle any errors. */ |
1047 | if (status != bit) { |
1048 | printf("%s: APE lock %d request failed! " |
1049 | "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n" , |
1050 | device_xname(sc->bge_dev), |
1051 | locknum, req + off, bit & 0xFFFF, gnt + off, |
1052 | status & 0xFFFF); |
1053 | /* Revoke the lock request. */ |
1054 | APE_WRITE_4(sc, gnt + off, bit); |
1055 | return (EBUSY); |
1056 | } |
1057 | |
1058 | return (0); |
1059 | } |
1060 | |
1061 | void |
1062 | bge_ape_unlock(struct bge_softc *sc, int locknum) |
1063 | { |
1064 | struct pci_attach_args *pa = &(sc->bge_pa); |
1065 | uint32_t bit, gnt; |
1066 | int off; |
1067 | |
1068 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) |
1069 | return; |
1070 | |
1071 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) |
1072 | gnt = BGE_APE_LOCK_GRANT; |
1073 | else |
1074 | gnt = BGE_APE_PER_LOCK_GRANT; |
1075 | |
1076 | off = 4 * locknum; |
1077 | |
1078 | switch (locknum) { |
1079 | case BGE_APE_LOCK_GPIO: |
1080 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) |
1081 | return; |
1082 | if (pa->pa_function == 0) |
1083 | bit = BGE_APE_LOCK_GRANT_DRIVER0; |
1084 | else |
1085 | bit = (1 << pa->pa_function); |
1086 | break; |
1087 | case BGE_APE_LOCK_GRC: |
1088 | if (pa->pa_function == 0) |
1089 | bit = BGE_APE_LOCK_GRANT_DRIVER0; |
1090 | else |
1091 | bit = (1 << pa->pa_function); |
1092 | break; |
1093 | case BGE_APE_LOCK_MEM: |
1094 | if (pa->pa_function == 0) |
1095 | bit = BGE_APE_LOCK_GRANT_DRIVER0; |
1096 | else |
1097 | bit = (1 << pa->pa_function); |
1098 | break; |
1099 | case BGE_APE_LOCK_PHY0: |
1100 | case BGE_APE_LOCK_PHY1: |
1101 | case BGE_APE_LOCK_PHY2: |
1102 | case BGE_APE_LOCK_PHY3: |
1103 | bit = BGE_APE_LOCK_GRANT_DRIVER0; |
1104 | break; |
1105 | default: |
1106 | return; |
1107 | } |
1108 | |
1109 | /* Write and flush for consecutive bge_ape_lock() */ |
1110 | APE_WRITE_4_FLUSH(sc, gnt + off, bit); |
1111 | } |
1112 | |
1113 | /* |
1114 | * Send an event to the APE firmware. |
1115 | */ |
1116 | void |
1117 | bge_ape_send_event(struct bge_softc *sc, uint32_t event) |
1118 | { |
1119 | uint32_t apedata; |
1120 | int i; |
1121 | |
1122 | /* NCSI does not support APE events. */ |
1123 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) |
1124 | return; |
1125 | |
1126 | /* Wait up to 1ms for APE to service previous event. */ |
1127 | for (i = 10; i > 0; i--) { |
1128 | if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) |
1129 | break; |
1130 | apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); |
1131 | if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { |
1132 | APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | |
1133 | BGE_APE_EVENT_STATUS_EVENT_PENDING); |
1134 | bge_ape_unlock(sc, BGE_APE_LOCK_MEM); |
1135 | APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); |
1136 | break; |
1137 | } |
1138 | bge_ape_unlock(sc, BGE_APE_LOCK_MEM); |
1139 | DELAY(100); |
1140 | } |
1141 | if (i == 0) { |
1142 | printf("%s: APE event 0x%08x send timed out\n" , |
1143 | device_xname(sc->bge_dev), event); |
1144 | } |
1145 | } |
1146 | |
1147 | void |
1148 | bge_ape_driver_state_change(struct bge_softc *sc, int kind) |
1149 | { |
1150 | uint32_t apedata, event; |
1151 | |
1152 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) |
1153 | return; |
1154 | |
1155 | switch (kind) { |
1156 | case BGE_RESET_START: |
1157 | /* If this is the first load, clear the load counter. */ |
1158 | apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); |
1159 | if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) |
1160 | APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); |
1161 | else { |
1162 | apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); |
1163 | APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); |
1164 | } |
1165 | APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, |
1166 | BGE_APE_HOST_SEG_SIG_MAGIC); |
1167 | APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, |
1168 | BGE_APE_HOST_SEG_LEN_MAGIC); |
1169 | |
1170 | /* Add some version info if bge(4) supports it. */ |
1171 | APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, |
1172 | BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); |
1173 | APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, |
1174 | BGE_APE_HOST_BEHAV_NO_PHYLOCK); |
1175 | APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, |
1176 | BGE_APE_HOST_HEARTBEAT_INT_DISABLE); |
1177 | APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, |
1178 | BGE_APE_HOST_DRVR_STATE_START); |
1179 | event = BGE_APE_EVENT_STATUS_STATE_START; |
1180 | break; |
1181 | case BGE_RESET_SHUTDOWN: |
1182 | APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, |
1183 | BGE_APE_HOST_DRVR_STATE_UNLOAD); |
1184 | event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; |
1185 | break; |
1186 | case BGE_RESET_SUSPEND: |
1187 | event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; |
1188 | break; |
1189 | default: |
1190 | return; |
1191 | } |
1192 | |
1193 | bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | |
1194 | BGE_APE_EVENT_STATUS_STATE_CHNGE); |
1195 | } |
1196 | |
1197 | static uint8_t |
1198 | bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) |
1199 | { |
1200 | uint32_t access, byte = 0; |
1201 | int i; |
1202 | |
1203 | /* Lock. */ |
1204 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); |
1205 | for (i = 0; i < 8000; i++) { |
1206 | if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) |
1207 | break; |
1208 | DELAY(20); |
1209 | } |
1210 | if (i == 8000) |
1211 | return 1; |
1212 | |
1213 | /* Enable access. */ |
1214 | access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); |
1215 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); |
1216 | |
1217 | CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); |
1218 | CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); |
1219 | for (i = 0; i < BGE_TIMEOUT * 10; i++) { |
1220 | DELAY(10); |
1221 | if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { |
1222 | DELAY(10); |
1223 | break; |
1224 | } |
1225 | } |
1226 | |
1227 | if (i == BGE_TIMEOUT * 10) { |
1228 | aprint_error_dev(sc->bge_dev, "nvram read timed out\n" ); |
1229 | return 1; |
1230 | } |
1231 | |
1232 | /* Get result. */ |
1233 | byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); |
1234 | |
1235 | *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; |
1236 | |
1237 | /* Disable access. */ |
1238 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); |
1239 | |
1240 | /* Unlock. */ |
1241 | CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); |
1242 | |
1243 | return 0; |
1244 | } |
1245 | |
1246 | /* |
1247 | * Read a sequence of bytes from NVRAM. |
1248 | */ |
1249 | static int |
1250 | bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) |
1251 | { |
1252 | int error = 0, i; |
1253 | uint8_t byte = 0; |
1254 | |
1255 | if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) |
1256 | return 1; |
1257 | |
1258 | for (i = 0; i < cnt; i++) { |
1259 | error = bge_nvram_getbyte(sc, off + i, &byte); |
1260 | if (error) |
1261 | break; |
1262 | *(dest + i) = byte; |
1263 | } |
1264 | |
1265 | return (error ? 1 : 0); |
1266 | } |
1267 | |
1268 | /* |
1269 | * Read a byte of data stored in the EEPROM at address 'addr.' The |
1270 | * BCM570x supports both the traditional bitbang interface and an |
1271 | * auto access interface for reading the EEPROM. We use the auto |
1272 | * access method. |
1273 | */ |
1274 | static uint8_t |
1275 | bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) |
1276 | { |
1277 | int i; |
1278 | uint32_t byte = 0; |
1279 | |
1280 | /* |
1281 | * Enable use of auto EEPROM access so we can avoid |
1282 | * having to use the bitbang method. |
1283 | */ |
1284 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); |
1285 | |
1286 | /* Reset the EEPROM, load the clock period. */ |
1287 | CSR_WRITE_4(sc, BGE_EE_ADDR, |
1288 | BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); |
1289 | DELAY(20); |
1290 | |
1291 | /* Issue the read EEPROM command. */ |
1292 | CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); |
1293 | |
1294 | /* Wait for completion */ |
1295 | for (i = 0; i < BGE_TIMEOUT * 10; i++) { |
1296 | DELAY(10); |
1297 | if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) |
1298 | break; |
1299 | } |
1300 | |
1301 | if (i == BGE_TIMEOUT * 10) { |
1302 | aprint_error_dev(sc->bge_dev, "eeprom read timed out\n" ); |
1303 | return 1; |
1304 | } |
1305 | |
1306 | /* Get result. */ |
1307 | byte = CSR_READ_4(sc, BGE_EE_DATA); |
1308 | |
1309 | *dest = (byte >> ((addr % 4) * 8)) & 0xFF; |
1310 | |
1311 | return 0; |
1312 | } |
1313 | |
1314 | /* |
1315 | * Read a sequence of bytes from the EEPROM. |
1316 | */ |
1317 | static int |
1318 | bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) |
1319 | { |
1320 | int error = 0, i; |
1321 | uint8_t byte = 0; |
1322 | char *dest = destv; |
1323 | |
1324 | for (i = 0; i < cnt; i++) { |
1325 | error = bge_eeprom_getbyte(sc, off + i, &byte); |
1326 | if (error) |
1327 | break; |
1328 | *(dest + i) = byte; |
1329 | } |
1330 | |
1331 | return (error ? 1 : 0); |
1332 | } |
1333 | |
1334 | static int |
1335 | bge_miibus_readreg(device_t dev, int phy, int reg) |
1336 | { |
1337 | struct bge_softc *sc = device_private(dev); |
1338 | uint32_t val; |
1339 | uint32_t autopoll; |
1340 | int i; |
1341 | |
1342 | if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) |
1343 | return 0; |
1344 | |
1345 | /* Reading with autopolling on may trigger PCI errors */ |
1346 | autopoll = CSR_READ_4(sc, BGE_MI_MODE); |
1347 | if (autopoll & BGE_MIMODE_AUTOPOLL) { |
1348 | BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); |
1349 | BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); |
1350 | DELAY(80); |
1351 | } |
1352 | |
1353 | CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | |
1354 | BGE_MIPHY(phy) | BGE_MIREG(reg)); |
1355 | |
1356 | for (i = 0; i < BGE_TIMEOUT; i++) { |
1357 | delay(10); |
1358 | val = CSR_READ_4(sc, BGE_MI_COMM); |
1359 | if (!(val & BGE_MICOMM_BUSY)) { |
1360 | DELAY(5); |
1361 | val = CSR_READ_4(sc, BGE_MI_COMM); |
1362 | break; |
1363 | } |
1364 | } |
1365 | |
1366 | if (i == BGE_TIMEOUT) { |
1367 | aprint_error_dev(sc->bge_dev, "PHY read timed out\n" ); |
1368 | val = 0; |
1369 | goto done; |
1370 | } |
1371 | |
1372 | done: |
1373 | if (autopoll & BGE_MIMODE_AUTOPOLL) { |
1374 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); |
1375 | BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); |
1376 | DELAY(80); |
1377 | } |
1378 | |
1379 | bge_ape_unlock(sc, sc->bge_phy_ape_lock); |
1380 | |
1381 | if (val & BGE_MICOMM_READFAIL) |
1382 | return 0; |
1383 | |
1384 | return (val & 0xFFFF); |
1385 | } |
1386 | |
1387 | static void |
1388 | bge_miibus_writereg(device_t dev, int phy, int reg, int val) |
1389 | { |
1390 | struct bge_softc *sc = device_private(dev); |
1391 | uint32_t autopoll; |
1392 | int i; |
1393 | |
1394 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && |
1395 | (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) |
1396 | return; |
1397 | |
1398 | if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) |
1399 | return; |
1400 | |
1401 | /* Reading with autopolling on may trigger PCI errors */ |
1402 | autopoll = CSR_READ_4(sc, BGE_MI_MODE); |
1403 | if (autopoll & BGE_MIMODE_AUTOPOLL) { |
1404 | BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); |
1405 | BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); |
1406 | DELAY(80); |
1407 | } |
1408 | |
1409 | CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | |
1410 | BGE_MIPHY(phy) | BGE_MIREG(reg) | val); |
1411 | |
1412 | for (i = 0; i < BGE_TIMEOUT; i++) { |
1413 | delay(10); |
1414 | if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { |
1415 | delay(5); |
1416 | CSR_READ_4(sc, BGE_MI_COMM); |
1417 | break; |
1418 | } |
1419 | } |
1420 | |
1421 | if (autopoll & BGE_MIMODE_AUTOPOLL) { |
1422 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); |
1423 | BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); |
1424 | delay(80); |
1425 | } |
1426 | |
1427 | bge_ape_unlock(sc, sc->bge_phy_ape_lock); |
1428 | |
1429 | if (i == BGE_TIMEOUT) |
1430 | aprint_error_dev(sc->bge_dev, "PHY read timed out\n" ); |
1431 | } |
1432 | |
1433 | static void |
1434 | bge_miibus_statchg(struct ifnet *ifp) |
1435 | { |
1436 | struct bge_softc *sc = ifp->if_softc; |
1437 | struct mii_data *mii = &sc->bge_mii; |
1438 | uint32_t mac_mode, rx_mode, tx_mode; |
1439 | |
1440 | /* |
1441 | * Get flow control negotiation result. |
1442 | */ |
1443 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && |
1444 | (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) |
1445 | sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; |
1446 | |
1447 | if (!BGE_STS_BIT(sc, BGE_STS_LINK) && |
1448 | mii->mii_media_status & IFM_ACTIVE && |
1449 | IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) |
1450 | BGE_STS_SETBIT(sc, BGE_STS_LINK); |
1451 | else if (BGE_STS_BIT(sc, BGE_STS_LINK) && |
1452 | (!(mii->mii_media_status & IFM_ACTIVE) || |
1453 | IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) |
1454 | BGE_STS_CLRBIT(sc, BGE_STS_LINK); |
1455 | |
1456 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)) |
1457 | return; |
1458 | |
1459 | /* Set the port mode (MII/GMII) to match the link speed. */ |
1460 | mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & |
1461 | ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); |
1462 | tx_mode = CSR_READ_4(sc, BGE_TX_MODE); |
1463 | rx_mode = CSR_READ_4(sc, BGE_RX_MODE); |
1464 | if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || |
1465 | IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) |
1466 | mac_mode |= BGE_PORTMODE_GMII; |
1467 | else |
1468 | mac_mode |= BGE_PORTMODE_MII; |
1469 | |
1470 | tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; |
1471 | rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; |
1472 | if ((mii->mii_media_active & IFM_FDX) != 0) { |
1473 | if (sc->bge_flowflags & IFM_ETH_TXPAUSE) |
1474 | tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; |
1475 | if (sc->bge_flowflags & IFM_ETH_RXPAUSE) |
1476 | rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; |
1477 | } else |
1478 | mac_mode |= BGE_MACMODE_HALF_DUPLEX; |
1479 | |
1480 | CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode); |
1481 | DELAY(40); |
1482 | CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); |
1483 | CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); |
1484 | } |
1485 | |
1486 | /* |
1487 | * Update rx threshold levels to values in a particular slot |
1488 | * of the interrupt-mitigation table bge_rx_threshes. |
1489 | */ |
1490 | static void |
1491 | bge_set_thresh(struct ifnet *ifp, int lvl) |
1492 | { |
1493 | struct bge_softc *sc = ifp->if_softc; |
1494 | int s; |
1495 | |
1496 | /* For now, just save the new Rx-intr thresholds and record |
1497 | * that a threshold update is pending. Updating the hardware |
1498 | * registers here (even at splhigh()) is observed to |
1499 | * occasionaly cause glitches where Rx-interrupts are not |
1500 | * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 |
1501 | */ |
1502 | s = splnet(); |
1503 | sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; |
1504 | sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; |
1505 | sc->bge_pending_rxintr_change = 1; |
1506 | splx(s); |
1507 | } |
1508 | |
1509 | |
1510 | /* |
1511 | * Update Rx thresholds of all bge devices |
1512 | */ |
1513 | static void |
1514 | bge_update_all_threshes(int lvl) |
1515 | { |
1516 | struct ifnet *ifp; |
1517 | const char * const namebuf = "bge" ; |
1518 | int namelen; |
1519 | int s; |
1520 | |
1521 | if (lvl < 0) |
1522 | lvl = 0; |
1523 | else if (lvl >= NBGE_RX_THRESH) |
1524 | lvl = NBGE_RX_THRESH - 1; |
1525 | |
1526 | namelen = strlen(namebuf); |
1527 | /* |
1528 | * Now search all the interfaces for this name/number |
1529 | */ |
1530 | s = pserialize_read_enter(); |
1531 | IFNET_READER_FOREACH(ifp) { |
1532 | if (strncmp(ifp->if_xname, namebuf, namelen) != 0) |
1533 | continue; |
1534 | /* We got a match: update if doing auto-threshold-tuning */ |
1535 | if (bge_auto_thresh) |
1536 | bge_set_thresh(ifp, lvl); |
1537 | } |
1538 | pserialize_read_exit(s); |
1539 | } |
1540 | |
1541 | /* |
1542 | * Handle events that have triggered interrupts. |
1543 | */ |
1544 | static void |
1545 | bge_handle_events(struct bge_softc *sc) |
1546 | { |
1547 | |
1548 | return; |
1549 | } |
1550 | |
1551 | /* |
1552 | * Memory management for jumbo frames. |
1553 | */ |
1554 | |
1555 | static int |
1556 | bge_alloc_jumbo_mem(struct bge_softc *sc) |
1557 | { |
1558 | char *ptr, *kva; |
1559 | bus_dma_segment_t seg; |
1560 | int i, rseg, state, error; |
1561 | struct bge_jpool_entry *entry; |
1562 | |
1563 | state = error = 0; |
1564 | |
1565 | /* Grab a big chunk o' storage. */ |
1566 | if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, |
1567 | &seg, 1, &rseg, BUS_DMA_NOWAIT)) { |
1568 | aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n" ); |
1569 | return ENOBUFS; |
1570 | } |
1571 | |
1572 | state = 1; |
1573 | if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, |
1574 | BUS_DMA_NOWAIT)) { |
1575 | aprint_error_dev(sc->bge_dev, |
1576 | "can't map DMA buffers (%d bytes)\n" , (int)BGE_JMEM); |
1577 | error = ENOBUFS; |
1578 | goto out; |
1579 | } |
1580 | |
1581 | state = 2; |
1582 | if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, |
1583 | BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { |
1584 | aprint_error_dev(sc->bge_dev, "can't create DMA map\n" ); |
1585 | error = ENOBUFS; |
1586 | goto out; |
1587 | } |
1588 | |
1589 | state = 3; |
1590 | if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, |
1591 | kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { |
1592 | aprint_error_dev(sc->bge_dev, "can't load DMA map\n" ); |
1593 | error = ENOBUFS; |
1594 | goto out; |
1595 | } |
1596 | |
1597 | state = 4; |
1598 | sc->bge_cdata.bge_jumbo_buf = (void *)kva; |
1599 | DPRINTFN(1,("bge_jumbo_buf = %p\n" , sc->bge_cdata.bge_jumbo_buf)); |
1600 | |
1601 | SLIST_INIT(&sc->bge_jfree_listhead); |
1602 | SLIST_INIT(&sc->bge_jinuse_listhead); |
1603 | |
1604 | /* |
1605 | * Now divide it up into 9K pieces and save the addresses |
1606 | * in an array. |
1607 | */ |
1608 | ptr = sc->bge_cdata.bge_jumbo_buf; |
1609 | for (i = 0; i < BGE_JSLOTS; i++) { |
1610 | sc->bge_cdata.bge_jslots[i] = ptr; |
1611 | ptr += BGE_JLEN; |
1612 | entry = malloc(sizeof(struct bge_jpool_entry), |
1613 | M_DEVBUF, M_NOWAIT); |
1614 | if (entry == NULL) { |
1615 | aprint_error_dev(sc->bge_dev, |
1616 | "no memory for jumbo buffer queue!\n" ); |
1617 | error = ENOBUFS; |
1618 | goto out; |
1619 | } |
1620 | entry->slot = i; |
1621 | SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, |
1622 | entry, jpool_entries); |
1623 | } |
1624 | out: |
1625 | if (error != 0) { |
1626 | switch (state) { |
1627 | case 4: |
1628 | bus_dmamap_unload(sc->bge_dmatag, |
1629 | sc->bge_cdata.bge_rx_jumbo_map); |
1630 | case 3: |
1631 | bus_dmamap_destroy(sc->bge_dmatag, |
1632 | sc->bge_cdata.bge_rx_jumbo_map); |
1633 | case 2: |
1634 | bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); |
1635 | case 1: |
1636 | bus_dmamem_free(sc->bge_dmatag, &seg, rseg); |
1637 | break; |
1638 | default: |
1639 | break; |
1640 | } |
1641 | } |
1642 | |
1643 | return error; |
1644 | } |
1645 | |
1646 | /* |
1647 | * Allocate a jumbo buffer. |
1648 | */ |
1649 | static void * |
1650 | bge_jalloc(struct bge_softc *sc) |
1651 | { |
1652 | struct bge_jpool_entry *entry; |
1653 | |
1654 | entry = SLIST_FIRST(&sc->bge_jfree_listhead); |
1655 | |
1656 | if (entry == NULL) { |
1657 | aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n" ); |
1658 | return NULL; |
1659 | } |
1660 | |
1661 | SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); |
1662 | SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); |
1663 | return (sc->bge_cdata.bge_jslots[entry->slot]); |
1664 | } |
1665 | |
1666 | /* |
1667 | * Release a jumbo buffer. |
1668 | */ |
1669 | static void |
1670 | bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) |
1671 | { |
1672 | struct bge_jpool_entry *entry; |
1673 | struct bge_softc *sc; |
1674 | int i, s; |
1675 | |
1676 | /* Extract the softc struct pointer. */ |
1677 | sc = (struct bge_softc *)arg; |
1678 | |
1679 | if (sc == NULL) |
1680 | panic("bge_jfree: can't find softc pointer!" ); |
1681 | |
1682 | /* calculate the slot this buffer belongs to */ |
1683 | |
1684 | i = ((char *)buf |
1685 | - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; |
1686 | |
1687 | if ((i < 0) || (i >= BGE_JSLOTS)) |
1688 | panic("bge_jfree: asked to free buffer that we don't manage!" ); |
1689 | |
1690 | s = splvm(); |
1691 | entry = SLIST_FIRST(&sc->bge_jinuse_listhead); |
1692 | if (entry == NULL) |
1693 | panic("bge_jfree: buffer not in use!" ); |
1694 | entry->slot = i; |
1695 | SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); |
1696 | SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); |
1697 | |
1698 | if (__predict_true(m != NULL)) |
1699 | pool_cache_put(mb_cache, m); |
1700 | splx(s); |
1701 | } |
1702 | |
1703 | |
1704 | /* |
1705 | * Initialize a standard receive ring descriptor. |
1706 | */ |
1707 | static int |
1708 | bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, |
1709 | bus_dmamap_t dmamap) |
1710 | { |
1711 | struct mbuf *m_new = NULL; |
1712 | struct bge_rx_bd *r; |
1713 | int error; |
1714 | |
1715 | if (dmamap == NULL) { |
1716 | error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, |
1717 | MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); |
1718 | if (error != 0) |
1719 | return error; |
1720 | } |
1721 | |
1722 | sc->bge_cdata.bge_rx_std_map[i] = dmamap; |
1723 | |
1724 | if (m == NULL) { |
1725 | MGETHDR(m_new, M_DONTWAIT, MT_DATA); |
1726 | if (m_new == NULL) |
1727 | return ENOBUFS; |
1728 | |
1729 | MCLGET(m_new, M_DONTWAIT); |
1730 | if (!(m_new->m_flags & M_EXT)) { |
1731 | m_freem(m_new); |
1732 | return ENOBUFS; |
1733 | } |
1734 | m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; |
1735 | |
1736 | } else { |
1737 | m_new = m; |
1738 | m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; |
1739 | m_new->m_data = m_new->m_ext.ext_buf; |
1740 | } |
1741 | if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) |
1742 | m_adj(m_new, ETHER_ALIGN); |
1743 | if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, |
1744 | BUS_DMA_READ|BUS_DMA_NOWAIT)) { |
1745 | m_freem(m_new); |
1746 | return ENOBUFS; |
1747 | } |
1748 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, |
1749 | BUS_DMASYNC_PREREAD); |
1750 | |
1751 | sc->bge_cdata.bge_rx_std_chain[i] = m_new; |
1752 | r = &sc->bge_rdata->bge_rx_std_ring[i]; |
1753 | BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); |
1754 | r->bge_flags = BGE_RXBDFLAG_END; |
1755 | r->bge_len = m_new->m_len; |
1756 | r->bge_idx = i; |
1757 | |
1758 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
1759 | offsetof(struct bge_ring_data, bge_rx_std_ring) + |
1760 | i * sizeof (struct bge_rx_bd), |
1761 | sizeof (struct bge_rx_bd), |
1762 | BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); |
1763 | |
1764 | return 0; |
1765 | } |
1766 | |
1767 | /* |
1768 | * Initialize a jumbo receive ring descriptor. This allocates |
1769 | * a jumbo buffer from the pool managed internally by the driver. |
1770 | */ |
1771 | static int |
1772 | bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) |
1773 | { |
1774 | struct mbuf *m_new = NULL; |
1775 | struct bge_rx_bd *r; |
1776 | void *buf = NULL; |
1777 | |
1778 | if (m == NULL) { |
1779 | |
1780 | /* Allocate the mbuf. */ |
1781 | MGETHDR(m_new, M_DONTWAIT, MT_DATA); |
1782 | if (m_new == NULL) |
1783 | return ENOBUFS; |
1784 | |
1785 | /* Allocate the jumbo buffer */ |
1786 | buf = bge_jalloc(sc); |
1787 | if (buf == NULL) { |
1788 | m_freem(m_new); |
1789 | aprint_error_dev(sc->bge_dev, |
1790 | "jumbo allocation failed -- packet dropped!\n" ); |
1791 | return ENOBUFS; |
1792 | } |
1793 | |
1794 | /* Attach the buffer to the mbuf. */ |
1795 | m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; |
1796 | MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, |
1797 | bge_jfree, sc); |
1798 | m_new->m_flags |= M_EXT_RW; |
1799 | } else { |
1800 | m_new = m; |
1801 | buf = m_new->m_data = m_new->m_ext.ext_buf; |
1802 | m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; |
1803 | } |
1804 | if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) |
1805 | m_adj(m_new, ETHER_ALIGN); |
1806 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, |
1807 | mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, |
1808 | BUS_DMASYNC_PREREAD); |
1809 | /* Set up the descriptor. */ |
1810 | r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; |
1811 | sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; |
1812 | BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); |
1813 | r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; |
1814 | r->bge_len = m_new->m_len; |
1815 | r->bge_idx = i; |
1816 | |
1817 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
1818 | offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + |
1819 | i * sizeof (struct bge_rx_bd), |
1820 | sizeof (struct bge_rx_bd), |
1821 | BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); |
1822 | |
1823 | return 0; |
1824 | } |
1825 | |
1826 | /* |
1827 | * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, |
1828 | * that's 1MB or memory, which is a lot. For now, we fill only the first |
1829 | * 256 ring entries and hope that our CPU is fast enough to keep up with |
1830 | * the NIC. |
1831 | */ |
1832 | static int |
1833 | bge_init_rx_ring_std(struct bge_softc *sc) |
1834 | { |
1835 | int i; |
1836 | |
1837 | if (sc->bge_flags & BGEF_RXRING_VALID) |
1838 | return 0; |
1839 | |
1840 | for (i = 0; i < BGE_SSLOTS; i++) { |
1841 | if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) |
1842 | return ENOBUFS; |
1843 | } |
1844 | |
1845 | sc->bge_std = i - 1; |
1846 | bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); |
1847 | |
1848 | sc->bge_flags |= BGEF_RXRING_VALID; |
1849 | |
1850 | return 0; |
1851 | } |
1852 | |
1853 | static void |
1854 | bge_free_rx_ring_std(struct bge_softc *sc) |
1855 | { |
1856 | int i; |
1857 | |
1858 | if (!(sc->bge_flags & BGEF_RXRING_VALID)) |
1859 | return; |
1860 | |
1861 | for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { |
1862 | if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { |
1863 | m_freem(sc->bge_cdata.bge_rx_std_chain[i]); |
1864 | sc->bge_cdata.bge_rx_std_chain[i] = NULL; |
1865 | bus_dmamap_destroy(sc->bge_dmatag, |
1866 | sc->bge_cdata.bge_rx_std_map[i]); |
1867 | } |
1868 | memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, |
1869 | sizeof(struct bge_rx_bd)); |
1870 | } |
1871 | |
1872 | sc->bge_flags &= ~BGEF_RXRING_VALID; |
1873 | } |
1874 | |
1875 | static int |
1876 | bge_init_rx_ring_jumbo(struct bge_softc *sc) |
1877 | { |
1878 | int i; |
1879 | volatile struct bge_rcb *rcb; |
1880 | |
1881 | if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID) |
1882 | return 0; |
1883 | |
1884 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { |
1885 | if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) |
1886 | return ENOBUFS; |
1887 | } |
1888 | |
1889 | sc->bge_jumbo = i - 1; |
1890 | sc->bge_flags |= BGEF_JUMBO_RXRING_VALID; |
1891 | |
1892 | rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; |
1893 | rcb->bge_maxlen_flags = 0; |
1894 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); |
1895 | |
1896 | bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); |
1897 | |
1898 | return 0; |
1899 | } |
1900 | |
1901 | static void |
1902 | bge_free_rx_ring_jumbo(struct bge_softc *sc) |
1903 | { |
1904 | int i; |
1905 | |
1906 | if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID)) |
1907 | return; |
1908 | |
1909 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { |
1910 | if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { |
1911 | m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); |
1912 | sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; |
1913 | } |
1914 | memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, |
1915 | sizeof(struct bge_rx_bd)); |
1916 | } |
1917 | |
1918 | sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID; |
1919 | } |
1920 | |
1921 | static void |
1922 | bge_free_tx_ring(struct bge_softc *sc) |
1923 | { |
1924 | int i; |
1925 | struct txdmamap_pool_entry *dma; |
1926 | |
1927 | if (!(sc->bge_flags & BGEF_TXRING_VALID)) |
1928 | return; |
1929 | |
1930 | for (i = 0; i < BGE_TX_RING_CNT; i++) { |
1931 | if (sc->bge_cdata.bge_tx_chain[i] != NULL) { |
1932 | m_freem(sc->bge_cdata.bge_tx_chain[i]); |
1933 | sc->bge_cdata.bge_tx_chain[i] = NULL; |
1934 | SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], |
1935 | link); |
1936 | sc->txdma[i] = 0; |
1937 | } |
1938 | memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, |
1939 | sizeof(struct bge_tx_bd)); |
1940 | } |
1941 | |
1942 | while ((dma = SLIST_FIRST(&sc->txdma_list))) { |
1943 | SLIST_REMOVE_HEAD(&sc->txdma_list, link); |
1944 | bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); |
1945 | free(dma, M_DEVBUF); |
1946 | } |
1947 | |
1948 | sc->bge_flags &= ~BGEF_TXRING_VALID; |
1949 | } |
1950 | |
1951 | static int |
1952 | bge_init_tx_ring(struct bge_softc *sc) |
1953 | { |
1954 | struct ifnet *ifp = &sc->ethercom.ec_if; |
1955 | int i; |
1956 | bus_dmamap_t dmamap; |
1957 | bus_size_t maxsegsz; |
1958 | struct txdmamap_pool_entry *dma; |
1959 | |
1960 | if (sc->bge_flags & BGEF_TXRING_VALID) |
1961 | return 0; |
1962 | |
1963 | sc->bge_txcnt = 0; |
1964 | sc->bge_tx_saved_considx = 0; |
1965 | |
1966 | /* Initialize transmit producer index for host-memory send ring. */ |
1967 | sc->bge_tx_prodidx = 0; |
1968 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); |
1969 | /* 5700 b2 errata */ |
1970 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) |
1971 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); |
1972 | |
1973 | /* NIC-memory send ring not used; initialize to zero. */ |
1974 | bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); |
1975 | /* 5700 b2 errata */ |
1976 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) |
1977 | bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); |
1978 | |
1979 | /* Limit DMA segment size for some chips */ |
1980 | if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) && |
1981 | (ifp->if_mtu <= ETHERMTU)) |
1982 | maxsegsz = 2048; |
1983 | else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) |
1984 | maxsegsz = 4096; |
1985 | else |
1986 | maxsegsz = ETHER_MAX_LEN_JUMBO; |
1987 | SLIST_INIT(&sc->txdma_list); |
1988 | for (i = 0; i < BGE_TX_RING_CNT; i++) { |
1989 | if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, |
1990 | BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT, |
1991 | &dmamap)) |
1992 | return ENOBUFS; |
1993 | if (dmamap == NULL) |
1994 | panic("dmamap NULL in bge_init_tx_ring" ); |
1995 | dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); |
1996 | if (dma == NULL) { |
1997 | aprint_error_dev(sc->bge_dev, |
1998 | "can't alloc txdmamap_pool_entry\n" ); |
1999 | bus_dmamap_destroy(sc->bge_dmatag, dmamap); |
2000 | return ENOMEM; |
2001 | } |
2002 | dma->dmamap = dmamap; |
2003 | SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); |
2004 | } |
2005 | |
2006 | sc->bge_flags |= BGEF_TXRING_VALID; |
2007 | |
2008 | return 0; |
2009 | } |
2010 | |
2011 | static void |
2012 | bge_setmulti(struct bge_softc *sc) |
2013 | { |
2014 | struct ethercom *ac = &sc->ethercom; |
2015 | struct ifnet *ifp = &ac->ec_if; |
2016 | struct ether_multi *enm; |
2017 | struct ether_multistep step; |
2018 | uint32_t hashes[4] = { 0, 0, 0, 0 }; |
2019 | uint32_t h; |
2020 | int i; |
2021 | |
2022 | if (ifp->if_flags & IFF_PROMISC) |
2023 | goto allmulti; |
2024 | |
2025 | /* Now program new ones. */ |
2026 | ETHER_FIRST_MULTI(step, ac, enm); |
2027 | while (enm != NULL) { |
2028 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
2029 | /* |
2030 | * We must listen to a range of multicast addresses. |
2031 | * For now, just accept all multicasts, rather than |
2032 | * trying to set only those filter bits needed to match |
2033 | * the range. (At this time, the only use of address |
2034 | * ranges is for IP multicast routing, for which the |
2035 | * range is big enough to require all bits set.) |
2036 | */ |
2037 | goto allmulti; |
2038 | } |
2039 | |
2040 | h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); |
2041 | |
2042 | /* Just want the 7 least-significant bits. */ |
2043 | h &= 0x7f; |
2044 | |
2045 | hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); |
2046 | ETHER_NEXT_MULTI(step, enm); |
2047 | } |
2048 | |
2049 | ifp->if_flags &= ~IFF_ALLMULTI; |
2050 | goto setit; |
2051 | |
2052 | allmulti: |
2053 | ifp->if_flags |= IFF_ALLMULTI; |
2054 | hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; |
2055 | |
2056 | setit: |
2057 | for (i = 0; i < 4; i++) |
2058 | CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); |
2059 | } |
2060 | |
2061 | static void |
2062 | bge_sig_pre_reset(struct bge_softc *sc, int type) |
2063 | { |
2064 | |
2065 | /* |
2066 | * Some chips don't like this so only do this if ASF is enabled |
2067 | */ |
2068 | if (sc->bge_asf_mode) |
2069 | bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); |
2070 | |
2071 | if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { |
2072 | switch (type) { |
2073 | case BGE_RESET_START: |
2074 | bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, |
2075 | BGE_FW_DRV_STATE_START); |
2076 | break; |
2077 | case BGE_RESET_SHUTDOWN: |
2078 | bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, |
2079 | BGE_FW_DRV_STATE_UNLOAD); |
2080 | break; |
2081 | case BGE_RESET_SUSPEND: |
2082 | bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, |
2083 | BGE_FW_DRV_STATE_SUSPEND); |
2084 | break; |
2085 | } |
2086 | } |
2087 | |
2088 | if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) |
2089 | bge_ape_driver_state_change(sc, type); |
2090 | } |
2091 | |
2092 | static void |
2093 | bge_sig_post_reset(struct bge_softc *sc, int type) |
2094 | { |
2095 | |
2096 | if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { |
2097 | switch (type) { |
2098 | case BGE_RESET_START: |
2099 | bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, |
2100 | BGE_FW_DRV_STATE_START_DONE); |
2101 | /* START DONE */ |
2102 | break; |
2103 | case BGE_RESET_SHUTDOWN: |
2104 | bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, |
2105 | BGE_FW_DRV_STATE_UNLOAD_DONE); |
2106 | break; |
2107 | } |
2108 | } |
2109 | |
2110 | if (type == BGE_RESET_SHUTDOWN) |
2111 | bge_ape_driver_state_change(sc, type); |
2112 | } |
2113 | |
2114 | static void |
2115 | bge_sig_legacy(struct bge_softc *sc, int type) |
2116 | { |
2117 | |
2118 | if (sc->bge_asf_mode) { |
2119 | switch (type) { |
2120 | case BGE_RESET_START: |
2121 | bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, |
2122 | BGE_FW_DRV_STATE_START); |
2123 | break; |
2124 | case BGE_RESET_SHUTDOWN: |
2125 | bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, |
2126 | BGE_FW_DRV_STATE_UNLOAD); |
2127 | break; |
2128 | } |
2129 | } |
2130 | } |
2131 | |
2132 | static void |
2133 | bge_wait_for_event_ack(struct bge_softc *sc) |
2134 | { |
2135 | int i; |
2136 | |
2137 | /* wait up to 2500usec */ |
2138 | for (i = 0; i < 250; i++) { |
2139 | if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & |
2140 | BGE_RX_CPU_DRV_EVENT)) |
2141 | break; |
2142 | DELAY(10); |
2143 | } |
2144 | } |
2145 | |
2146 | static void |
2147 | bge_stop_fw(struct bge_softc *sc) |
2148 | { |
2149 | |
2150 | if (sc->bge_asf_mode) { |
2151 | bge_wait_for_event_ack(sc); |
2152 | |
2153 | bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); |
2154 | CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, |
2155 | CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); |
2156 | |
2157 | bge_wait_for_event_ack(sc); |
2158 | } |
2159 | } |
2160 | |
2161 | static int |
2162 | bge_poll_fw(struct bge_softc *sc) |
2163 | { |
2164 | uint32_t val; |
2165 | int i; |
2166 | |
2167 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { |
2168 | for (i = 0; i < BGE_TIMEOUT; i++) { |
2169 | val = CSR_READ_4(sc, BGE_VCPU_STATUS); |
2170 | if (val & BGE_VCPU_STATUS_INIT_DONE) |
2171 | break; |
2172 | DELAY(100); |
2173 | } |
2174 | if (i >= BGE_TIMEOUT) { |
2175 | aprint_error_dev(sc->bge_dev, "reset timed out\n" ); |
2176 | return -1; |
2177 | } |
2178 | } else { |
2179 | /* |
2180 | * Poll the value location we just wrote until |
2181 | * we see the 1's complement of the magic number. |
2182 | * This indicates that the firmware initialization |
2183 | * is complete. |
2184 | * XXX 1000ms for Flash and 10000ms for SEEPROM. |
2185 | */ |
2186 | for (i = 0; i < BGE_TIMEOUT; i++) { |
2187 | val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); |
2188 | if (val == ~BGE_SRAM_FW_MB_MAGIC) |
2189 | break; |
2190 | DELAY(10); |
2191 | } |
2192 | |
2193 | if ((i >= BGE_TIMEOUT) |
2194 | && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) { |
2195 | aprint_error_dev(sc->bge_dev, |
2196 | "firmware handshake timed out, val = %x\n" , val); |
2197 | return -1; |
2198 | } |
2199 | } |
2200 | |
2201 | if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { |
2202 | /* tg3 says we have to wait extra time */ |
2203 | delay(10 * 1000); |
2204 | } |
2205 | |
2206 | return 0; |
2207 | } |
2208 | |
2209 | int |
2210 | bge_phy_addr(struct bge_softc *sc) |
2211 | { |
2212 | struct pci_attach_args *pa = &(sc->bge_pa); |
2213 | int phy_addr = 1; |
2214 | |
2215 | /* |
2216 | * PHY address mapping for various devices. |
2217 | * |
2218 | * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | |
2219 | * ---------+-------+-------+-------+-------+ |
2220 | * BCM57XX | 1 | X | X | X | |
2221 | * BCM5704 | 1 | X | 1 | X | |
2222 | * BCM5717 | 1 | 8 | 2 | 9 | |
2223 | * BCM5719 | 1 | 8 | 2 | 9 | |
2224 | * BCM5720 | 1 | 8 | 2 | 9 | |
2225 | * |
2226 | * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | |
2227 | * ---------+-------+-------+-------+-------+ |
2228 | * BCM57XX | X | X | X | X | |
2229 | * BCM5704 | X | X | X | X | |
2230 | * BCM5717 | X | X | X | X | |
2231 | * BCM5719 | 3 | 10 | 4 | 11 | |
2232 | * BCM5720 | X | X | X | X | |
2233 | * |
2234 | * Other addresses may respond but they are not |
2235 | * IEEE compliant PHYs and should be ignored. |
2236 | */ |
2237 | switch (BGE_ASICREV(sc->bge_chipid)) { |
2238 | case BGE_ASICREV_BCM5717: |
2239 | case BGE_ASICREV_BCM5719: |
2240 | case BGE_ASICREV_BCM5720: |
2241 | phy_addr = pa->pa_function; |
2242 | if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { |
2243 | phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & |
2244 | BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; |
2245 | } else { |
2246 | phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & |
2247 | BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; |
2248 | } |
2249 | } |
2250 | |
2251 | return phy_addr; |
2252 | } |
2253 | |
2254 | /* |
2255 | * Do endian, PCI and DMA initialization. Also check the on-board ROM |
2256 | * self-test results. |
2257 | */ |
2258 | static int |
2259 | bge_chipinit(struct bge_softc *sc) |
2260 | { |
2261 | uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg; |
2262 | int i; |
2263 | |
2264 | /* Set endianness before we access any non-PCI registers. */ |
2265 | misc_ctl = BGE_INIT; |
2266 | if (sc->bge_flags & BGEF_TAGGED_STATUS) |
2267 | misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; |
2268 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, |
2269 | misc_ctl); |
2270 | |
2271 | /* |
2272 | * Clear the MAC statistics block in the NIC's |
2273 | * internal memory. |
2274 | */ |
2275 | for (i = BGE_STATS_BLOCK; |
2276 | i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) |
2277 | BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); |
2278 | |
2279 | for (i = BGE_STATUS_BLOCK; |
2280 | i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) |
2281 | BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); |
2282 | |
2283 | /* 5717 workaround from tg3 */ |
2284 | if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) { |
2285 | /* Save */ |
2286 | mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); |
2287 | |
2288 | /* Temporary modify MODE_CTL to control TLP */ |
2289 | reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; |
2290 | CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1); |
2291 | |
2292 | /* Control TLP */ |
2293 | reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + |
2294 | BGE_TLP_PHYCTL1); |
2295 | CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1, |
2296 | reg | BGE_TLP_PHYCTL1_EN_L1PLLPD); |
2297 | |
2298 | /* Restore */ |
2299 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); |
2300 | } |
2301 | |
2302 | if (BGE_IS_57765_FAMILY(sc)) { |
2303 | if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { |
2304 | /* Save */ |
2305 | mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); |
2306 | |
2307 | /* Temporary modify MODE_CTL to control TLP */ |
2308 | reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; |
2309 | CSR_WRITE_4(sc, BGE_MODE_CTL, |
2310 | reg | BGE_MODECTL_PCIE_TLPADDR1); |
2311 | |
2312 | /* Control TLP */ |
2313 | reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + |
2314 | BGE_TLP_PHYCTL5); |
2315 | CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5, |
2316 | reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ); |
2317 | |
2318 | /* Restore */ |
2319 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); |
2320 | } |
2321 | if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { |
2322 | reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); |
2323 | CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, |
2324 | reg | BGE_CPMU_PADRNG_CTL_RDIV2); |
2325 | |
2326 | /* Save */ |
2327 | mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); |
2328 | |
2329 | /* Temporary modify MODE_CTL to control TLP */ |
2330 | reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; |
2331 | CSR_WRITE_4(sc, BGE_MODE_CTL, |
2332 | reg | BGE_MODECTL_PCIE_TLPADDR0); |
2333 | |
2334 | /* Control TLP */ |
2335 | reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + |
2336 | BGE_TLP_FTSMAX); |
2337 | reg &= ~BGE_TLP_FTSMAX_MSK; |
2338 | CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX, |
2339 | reg | BGE_TLP_FTSMAX_VAL); |
2340 | |
2341 | /* Restore */ |
2342 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); |
2343 | } |
2344 | |
2345 | reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); |
2346 | reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; |
2347 | reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; |
2348 | CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); |
2349 | } |
2350 | |
2351 | /* Set up the PCI DMA control register. */ |
2352 | dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; |
2353 | if (sc->bge_flags & BGEF_PCIE) { |
2354 | /* Read watermark not used, 128 bytes for write. */ |
2355 | DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n" , |
2356 | device_xname(sc->bge_dev))); |
2357 | if (sc->bge_mps >= 256) |
2358 | dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); |
2359 | else |
2360 | dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); |
2361 | } else if (sc->bge_flags & BGEF_PCIX) { |
2362 | DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n" , |
2363 | device_xname(sc->bge_dev))); |
2364 | /* PCI-X bus */ |
2365 | if (BGE_IS_5714_FAMILY(sc)) { |
2366 | /* 256 bytes for read and write. */ |
2367 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | |
2368 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); |
2369 | |
2370 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) |
2371 | dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; |
2372 | else |
2373 | dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; |
2374 | } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { |
2375 | /* |
2376 | * In the BCM5703, the DMA read watermark should |
2377 | * be set to less than or equal to the maximum |
2378 | * memory read byte count of the PCI-X command |
2379 | * register. |
2380 | */ |
2381 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | |
2382 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); |
2383 | } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { |
2384 | /* 1536 bytes for read, 384 bytes for write. */ |
2385 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | |
2386 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); |
2387 | } else { |
2388 | /* 384 bytes for read and write. */ |
2389 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | |
2390 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | |
2391 | (0x0F); |
2392 | } |
2393 | |
2394 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || |
2395 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { |
2396 | uint32_t tmp; |
2397 | |
2398 | /* Set ONEDMA_ATONCE for hardware workaround. */ |
2399 | tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; |
2400 | if (tmp == 6 || tmp == 7) |
2401 | dma_rw_ctl |= |
2402 | BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; |
2403 | |
2404 | /* Set PCI-X DMA write workaround. */ |
2405 | dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; |
2406 | } |
2407 | } else { |
2408 | /* Conventional PCI bus: 256 bytes for read and write. */ |
2409 | DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n" , |
2410 | device_xname(sc->bge_dev))); |
2411 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | |
2412 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); |
2413 | |
2414 | if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && |
2415 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) |
2416 | dma_rw_ctl |= 0x0F; |
2417 | } |
2418 | |
2419 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || |
2420 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) |
2421 | dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | |
2422 | BGE_PCIDMARWCTL_ASRT_ALL_BE; |
2423 | |
2424 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || |
2425 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) |
2426 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; |
2427 | |
2428 | if (BGE_IS_57765_PLUS(sc)) { |
2429 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; |
2430 | if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) |
2431 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; |
2432 | |
2433 | /* |
2434 | * Enable HW workaround for controllers that misinterpret |
2435 | * a status tag update and leave interrupts permanently |
2436 | * disabled. |
2437 | */ |
2438 | if (!BGE_IS_57765_FAMILY(sc) && |
2439 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717) |
2440 | dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; |
2441 | } |
2442 | |
2443 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, |
2444 | dma_rw_ctl); |
2445 | |
2446 | /* |
2447 | * Set up general mode register. |
2448 | */ |
2449 | mode_ctl = BGE_DMA_SWAP_OPTIONS; |
2450 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { |
2451 | /* Retain Host-2-BMC settings written by APE firmware. */ |
2452 | mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & |
2453 | (BGE_MODECTL_BYTESWAP_B2HRX_DATA | |
2454 | BGE_MODECTL_WORDSWAP_B2HRX_DATA | |
2455 | BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); |
2456 | } |
2457 | mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | |
2458 | BGE_MODECTL_TX_NO_PHDR_CSUM; |
2459 | |
2460 | /* |
2461 | * BCM5701 B5 have a bug causing data corruption when using |
2462 | * 64-bit DMA reads, which can be terminated early and then |
2463 | * completed later as 32-bit accesses, in combination with |
2464 | * certain bridges. |
2465 | */ |
2466 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && |
2467 | sc->bge_chipid == BGE_CHIPID_BCM5701_B5) |
2468 | mode_ctl |= BGE_MODECTL_FORCE_PCI32; |
2469 | |
2470 | /* |
2471 | * Tell the firmware the driver is running |
2472 | */ |
2473 | if (sc->bge_asf_mode & ASF_STACKUP) |
2474 | mode_ctl |= BGE_MODECTL_STACKUP; |
2475 | |
2476 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); |
2477 | |
2478 | /* |
2479 | * Disable memory write invalidate. Apparently it is not supported |
2480 | * properly by these devices. |
2481 | */ |
2482 | PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, |
2483 | PCI_COMMAND_INVALIDATE_ENABLE); |
2484 | |
2485 | #ifdef __brokenalpha__ |
2486 | /* |
2487 | * Must insure that we do not cross an 8K (bytes) boundary |
2488 | * for DMA reads. Our highest limit is 1K bytes. This is a |
2489 | * restriction on some ALPHA platforms with early revision |
2490 | * 21174 PCI chipsets, such as the AlphaPC 164lx |
2491 | */ |
2492 | PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); |
2493 | #endif |
2494 | |
2495 | /* Set the timer prescaler (always 66MHz) */ |
2496 | CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); |
2497 | |
2498 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { |
2499 | DELAY(40); /* XXX */ |
2500 | |
2501 | /* Put PHY into ready state */ |
2502 | BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); |
2503 | DELAY(40); |
2504 | } |
2505 | |
2506 | return 0; |
2507 | } |
2508 | |
2509 | static int |
2510 | bge_blockinit(struct bge_softc *sc) |
2511 | { |
2512 | volatile struct bge_rcb *rcb; |
2513 | bus_size_t rcb_addr; |
2514 | struct ifnet *ifp = &sc->ethercom.ec_if; |
2515 | bge_hostaddr taddr; |
2516 | uint32_t dmactl, mimode, val; |
2517 | int i, limit; |
2518 | |
2519 | /* |
2520 | * Initialize the memory window pointer register so that |
2521 | * we can access the first 32K of internal NIC RAM. This will |
2522 | * allow us to set up the TX send ring RCBs and the RX return |
2523 | * ring RCBs, plus other things which live in NIC memory. |
2524 | */ |
2525 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); |
2526 | |
2527 | if (!BGE_IS_5705_PLUS(sc)) { |
2528 | /* 57XX step 33 */ |
2529 | /* Configure mbuf memory pool */ |
2530 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, |
2531 | BGE_BUFFPOOL_1); |
2532 | |
2533 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) |
2534 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); |
2535 | else |
2536 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); |
2537 | |
2538 | /* 57XX step 34 */ |
2539 | /* Configure DMA resource pool */ |
2540 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, |
2541 | BGE_DMA_DESCRIPTORS); |
2542 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); |
2543 | } |
2544 | |
2545 | /* 5718 step 11, 57XX step 35 */ |
2546 | /* |
2547 | * Configure mbuf pool watermarks. New broadcom docs strongly |
2548 | * recommend these. |
2549 | */ |
2550 | if (BGE_IS_5717_PLUS(sc)) { |
2551 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); |
2552 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); |
2553 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); |
2554 | } else if (BGE_IS_5705_PLUS(sc)) { |
2555 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); |
2556 | |
2557 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { |
2558 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); |
2559 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); |
2560 | } else { |
2561 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); |
2562 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); |
2563 | } |
2564 | } else { |
2565 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); |
2566 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); |
2567 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); |
2568 | } |
2569 | |
2570 | /* 57XX step 36 */ |
2571 | /* Configure DMA resource watermarks */ |
2572 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); |
2573 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); |
2574 | |
2575 | /* 5718 step 13, 57XX step 38 */ |
2576 | /* Enable buffer manager */ |
2577 | val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN; |
2578 | /* |
2579 | * Change the arbitration algorithm of TXMBUF read request to |
2580 | * round-robin instead of priority based for BCM5719. When |
2581 | * TXFIFO is almost empty, RDMA will hold its request until |
2582 | * TXFIFO is not almost empty. |
2583 | */ |
2584 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) |
2585 | val |= BGE_BMANMODE_NO_TX_UNDERRUN; |
2586 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || |
2587 | sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || |
2588 | sc->bge_chipid == BGE_CHIPID_BCM5720_A0) |
2589 | val |= BGE_BMANMODE_LOMBUF_ATTN; |
2590 | CSR_WRITE_4(sc, BGE_BMAN_MODE, val); |
2591 | |
2592 | /* 57XX step 39 */ |
2593 | /* Poll for buffer manager start indication */ |
2594 | for (i = 0; i < BGE_TIMEOUT * 2; i++) { |
2595 | DELAY(10); |
2596 | if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) |
2597 | break; |
2598 | } |
2599 | |
2600 | if (i == BGE_TIMEOUT * 2) { |
2601 | aprint_error_dev(sc->bge_dev, |
2602 | "buffer manager failed to start\n" ); |
2603 | return ENXIO; |
2604 | } |
2605 | |
2606 | /* 57XX step 40 */ |
2607 | /* Enable flow-through queues */ |
2608 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); |
2609 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); |
2610 | |
2611 | /* Wait until queue initialization is complete */ |
2612 | for (i = 0; i < BGE_TIMEOUT * 2; i++) { |
2613 | if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) |
2614 | break; |
2615 | DELAY(10); |
2616 | } |
2617 | |
2618 | if (i == BGE_TIMEOUT * 2) { |
2619 | aprint_error_dev(sc->bge_dev, |
2620 | "flow-through queue init failed\n" ); |
2621 | return ENXIO; |
2622 | } |
2623 | |
2624 | /* |
2625 | * Summary of rings supported by the controller: |
2626 | * |
2627 | * Standard Receive Producer Ring |
2628 | * - This ring is used to feed receive buffers for "standard" |
2629 | * sized frames (typically 1536 bytes) to the controller. |
2630 | * |
2631 | * Jumbo Receive Producer Ring |
2632 | * - This ring is used to feed receive buffers for jumbo sized |
2633 | * frames (i.e. anything bigger than the "standard" frames) |
2634 | * to the controller. |
2635 | * |
2636 | * Mini Receive Producer Ring |
2637 | * - This ring is used to feed receive buffers for "mini" |
2638 | * sized frames to the controller. |
2639 | * - This feature required external memory for the controller |
2640 | * but was never used in a production system. Should always |
2641 | * be disabled. |
2642 | * |
2643 | * Receive Return Ring |
2644 | * - After the controller has placed an incoming frame into a |
2645 | * receive buffer that buffer is moved into a receive return |
2646 | * ring. The driver is then responsible to passing the |
2647 | * buffer up to the stack. Many versions of the controller |
2648 | * support multiple RR rings. |
2649 | * |
2650 | * Send Ring |
2651 | * - This ring is used for outgoing frames. Many versions of |
2652 | * the controller support multiple send rings. |
2653 | */ |
2654 | |
2655 | /* 5718 step 15, 57XX step 41 */ |
2656 | /* Initialize the standard RX ring control block */ |
2657 | rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; |
2658 | BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); |
2659 | /* 5718 step 16 */ |
2660 | if (BGE_IS_57765_PLUS(sc)) { |
2661 | /* |
2662 | * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) |
2663 | * Bits 15-2 : Maximum RX frame size |
2664 | * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled |
2665 | * Bit 0 : Reserved |
2666 | */ |
2667 | rcb->bge_maxlen_flags = |
2668 | BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); |
2669 | } else if (BGE_IS_5705_PLUS(sc)) { |
2670 | /* |
2671 | * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) |
2672 | * Bits 15-2 : Reserved (should be 0) |
2673 | * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled |
2674 | * Bit 0 : Reserved |
2675 | */ |
2676 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); |
2677 | } else { |
2678 | /* |
2679 | * Ring size is always XXX entries |
2680 | * Bits 31-16: Maximum RX frame size |
2681 | * Bits 15-2 : Reserved (should be 0) |
2682 | * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled |
2683 | * Bit 0 : Reserved |
2684 | */ |
2685 | rcb->bge_maxlen_flags = |
2686 | BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); |
2687 | } |
2688 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || |
2689 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || |
2690 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) |
2691 | rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; |
2692 | else |
2693 | rcb->bge_nicaddr = BGE_STD_RX_RINGS; |
2694 | /* Write the standard receive producer ring control block. */ |
2695 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); |
2696 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); |
2697 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); |
2698 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); |
2699 | |
2700 | /* Reset the standard receive producer ring producer index. */ |
2701 | bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); |
2702 | |
2703 | /* 57XX step 42 */ |
2704 | /* |
2705 | * Initialize the jumbo RX ring control block |
2706 | * We set the 'ring disabled' bit in the flags |
2707 | * field until we're actually ready to start |
2708 | * using this ring (i.e. once we set the MTU |
2709 | * high enough to require it). |
2710 | */ |
2711 | if (BGE_IS_JUMBO_CAPABLE(sc)) { |
2712 | rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; |
2713 | BGE_HOSTADDR(rcb->bge_hostaddr, |
2714 | BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); |
2715 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, |
2716 | BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); |
2717 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || |
2718 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || |
2719 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) |
2720 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; |
2721 | else |
2722 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; |
2723 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, |
2724 | rcb->bge_hostaddr.bge_addr_hi); |
2725 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, |
2726 | rcb->bge_hostaddr.bge_addr_lo); |
2727 | /* Program the jumbo receive producer ring RCB parameters. */ |
2728 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, |
2729 | rcb->bge_maxlen_flags); |
2730 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); |
2731 | /* Reset the jumbo receive producer ring producer index. */ |
2732 | bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); |
2733 | } |
2734 | |
2735 | /* 57XX step 43 */ |
2736 | /* Disable the mini receive producer ring RCB. */ |
2737 | if (BGE_IS_5700_FAMILY(sc)) { |
2738 | /* Set up dummy disabled mini ring RCB */ |
2739 | rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; |
2740 | rcb->bge_maxlen_flags = |
2741 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); |
2742 | CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, |
2743 | rcb->bge_maxlen_flags); |
2744 | /* Reset the mini receive producer ring producer index. */ |
2745 | bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); |
2746 | |
2747 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
2748 | offsetof(struct bge_ring_data, bge_info), |
2749 | sizeof (struct bge_gib), |
2750 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
2751 | } |
2752 | |
2753 | /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ |
2754 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { |
2755 | if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || |
2756 | sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || |
2757 | sc->bge_chipid == BGE_CHIPID_BCM5906_A2) |
2758 | CSR_WRITE_4(sc, BGE_ISO_PKT_TX, |
2759 | (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); |
2760 | } |
2761 | /* 5718 step 14, 57XX step 44 */ |
2762 | /* |
2763 | * The BD ring replenish thresholds control how often the |
2764 | * hardware fetches new BD's from the producer rings in host |
2765 | * memory. Setting the value too low on a busy system can |
2766 | * starve the hardware and recue the throughpout. |
2767 | * |
2768 | * Set the BD ring replenish thresholds. The recommended |
2769 | * values are 1/8th the number of descriptors allocated to |
2770 | * each ring, but since we try to avoid filling the entire |
2771 | * ring we set these to the minimal value of 8. This needs to |
2772 | * be done on several of the supported chip revisions anyway, |
2773 | * to work around HW bugs. |
2774 | */ |
2775 | CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); |
2776 | if (BGE_IS_JUMBO_CAPABLE(sc)) |
2777 | CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); |
2778 | |
2779 | /* 5718 step 18 */ |
2780 | if (BGE_IS_5717_PLUS(sc)) { |
2781 | CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); |
2782 | CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); |
2783 | } |
2784 | |
2785 | /* 57XX step 45 */ |
2786 | /* |
2787 | * Disable all send rings by setting the 'ring disabled' bit |
2788 | * in the flags field of all the TX send ring control blocks, |
2789 | * located in NIC memory. |
2790 | */ |
2791 | if (BGE_IS_5700_FAMILY(sc)) { |
2792 | /* 5700 to 5704 had 16 send rings. */ |
2793 | limit = BGE_TX_RINGS_EXTSSRAM_MAX; |
2794 | } else if (BGE_IS_5717_PLUS(sc)) { |
2795 | limit = BGE_TX_RINGS_5717_MAX; |
2796 | } else if (BGE_IS_57765_FAMILY(sc)) { |
2797 | limit = BGE_TX_RINGS_57765_MAX; |
2798 | } else |
2799 | limit = 1; |
2800 | rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; |
2801 | for (i = 0; i < limit; i++) { |
2802 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, |
2803 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); |
2804 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); |
2805 | rcb_addr += sizeof(struct bge_rcb); |
2806 | } |
2807 | |
2808 | /* 57XX step 46 and 47 */ |
2809 | /* Configure send ring RCB 0 (we use only the first ring) */ |
2810 | rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; |
2811 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); |
2812 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); |
2813 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); |
2814 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || |
2815 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || |
2816 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) |
2817 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); |
2818 | else |
2819 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, |
2820 | BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); |
2821 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, |
2822 | BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); |
2823 | |
2824 | /* 57XX step 48 */ |
2825 | /* |
2826 | * Disable all receive return rings by setting the |
2827 | * 'ring diabled' bit in the flags field of all the receive |
2828 | * return ring control blocks, located in NIC memory. |
2829 | */ |
2830 | if (BGE_IS_5717_PLUS(sc)) { |
2831 | /* Should be 17, use 16 until we get an SRAM map. */ |
2832 | limit = 16; |
2833 | } else if (BGE_IS_5700_FAMILY(sc)) |
2834 | limit = BGE_RX_RINGS_MAX; |
2835 | else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || |
2836 | BGE_IS_57765_FAMILY(sc)) |
2837 | limit = 4; |
2838 | else |
2839 | limit = 1; |
2840 | /* Disable all receive return rings */ |
2841 | rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; |
2842 | for (i = 0; i < limit; i++) { |
2843 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); |
2844 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); |
2845 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, |
2846 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, |
2847 | BGE_RCB_FLAG_RING_DISABLED)); |
2848 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); |
2849 | bge_writembx(sc, BGE_MBX_RX_CONS0_LO + |
2850 | (i * (sizeof(uint64_t))), 0); |
2851 | rcb_addr += sizeof(struct bge_rcb); |
2852 | } |
2853 | |
2854 | /* 57XX step 49 */ |
2855 | /* |
2856 | * Set up receive return ring 0. Note that the NIC address |
2857 | * for RX return rings is 0x0. The return rings live entirely |
2858 | * within the host, so the nicaddr field in the RCB isn't used. |
2859 | */ |
2860 | rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; |
2861 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); |
2862 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); |
2863 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); |
2864 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); |
2865 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, |
2866 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); |
2867 | |
2868 | /* 5718 step 24, 57XX step 53 */ |
2869 | /* Set random backoff seed for TX */ |
2870 | CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, |
2871 | (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + |
2872 | CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + |
2873 | CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) & |
2874 | BGE_TX_BACKOFF_SEED_MASK); |
2875 | |
2876 | /* 5718 step 26, 57XX step 55 */ |
2877 | /* Set inter-packet gap */ |
2878 | val = 0x2620; |
2879 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) |
2880 | val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & |
2881 | (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); |
2882 | CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); |
2883 | |
2884 | /* 5718 step 27, 57XX step 56 */ |
2885 | /* |
2886 | * Specify which ring to use for packets that don't match |
2887 | * any RX rules. |
2888 | */ |
2889 | CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); |
2890 | |
2891 | /* 5718 step 28, 57XX step 57 */ |
2892 | /* |
2893 | * Configure number of RX lists. One interrupt distribution |
2894 | * list, sixteen active lists, one bad frames class. |
2895 | */ |
2896 | CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); |
2897 | |
2898 | /* 5718 step 29, 57XX step 58 */ |
2899 | /* Inialize RX list placement stats mask. */ |
2900 | if (BGE_IS_575X_PLUS(sc)) { |
2901 | val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK); |
2902 | val &= ~BGE_RXLPSTATCONTROL_DACK_FIX; |
2903 | CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val); |
2904 | } else |
2905 | CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); |
2906 | |
2907 | /* 5718 step 30, 57XX step 59 */ |
2908 | CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); |
2909 | |
2910 | /* 5718 step 33, 57XX step 62 */ |
2911 | /* Disable host coalescing until we get it set up */ |
2912 | CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); |
2913 | |
2914 | /* 5718 step 34, 57XX step 63 */ |
2915 | /* Poll to make sure it's shut down. */ |
2916 | for (i = 0; i < BGE_TIMEOUT * 2; i++) { |
2917 | DELAY(10); |
2918 | if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) |
2919 | break; |
2920 | } |
2921 | |
2922 | if (i == BGE_TIMEOUT * 2) { |
2923 | aprint_error_dev(sc->bge_dev, |
2924 | "host coalescing engine failed to idle\n" ); |
2925 | return ENXIO; |
2926 | } |
2927 | |
2928 | /* 5718 step 35, 36, 37 */ |
2929 | /* Set up host coalescing defaults */ |
2930 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); |
2931 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); |
2932 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); |
2933 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); |
2934 | if (!(BGE_IS_5705_PLUS(sc))) { |
2935 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); |
2936 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); |
2937 | } |
2938 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); |
2939 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); |
2940 | |
2941 | /* Set up address of statistics block */ |
2942 | if (BGE_IS_5700_FAMILY(sc)) { |
2943 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); |
2944 | CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); |
2945 | CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); |
2946 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); |
2947 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); |
2948 | } |
2949 | |
2950 | /* 5718 step 38 */ |
2951 | /* Set up address of status block */ |
2952 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); |
2953 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); |
2954 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); |
2955 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); |
2956 | sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; |
2957 | sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; |
2958 | |
2959 | /* Set up status block size. */ |
2960 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && |
2961 | sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { |
2962 | val = BGE_STATBLKSZ_FULL; |
2963 | bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); |
2964 | } else { |
2965 | val = BGE_STATBLKSZ_32BYTE; |
2966 | bzero(&sc->bge_rdata->bge_status_block, 32); |
2967 | } |
2968 | |
2969 | /* 5718 step 39, 57XX step 73 */ |
2970 | /* Turn on host coalescing state machine */ |
2971 | CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); |
2972 | |
2973 | /* 5718 step 40, 57XX step 74 */ |
2974 | /* Turn on RX BD completion state machine and enable attentions */ |
2975 | CSR_WRITE_4(sc, BGE_RBDC_MODE, |
2976 | BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); |
2977 | |
2978 | /* 5718 step 41, 57XX step 75 */ |
2979 | /* Turn on RX list placement state machine */ |
2980 | CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); |
2981 | |
2982 | /* 57XX step 76 */ |
2983 | /* Turn on RX list selector state machine. */ |
2984 | if (!(BGE_IS_5705_PLUS(sc))) |
2985 | CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); |
2986 | |
2987 | val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | |
2988 | BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | |
2989 | BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | |
2990 | BGE_MACMODE_FRMHDR_DMA_ENB; |
2991 | |
2992 | if (sc->bge_flags & BGEF_FIBER_TBI) |
2993 | val |= BGE_PORTMODE_TBI; |
2994 | else if (sc->bge_flags & BGEF_FIBER_MII) |
2995 | val |= BGE_PORTMODE_GMII; |
2996 | else |
2997 | val |= BGE_PORTMODE_MII; |
2998 | |
2999 | /* 5718 step 42 and 43, 57XX step 77 and 78 */ |
3000 | /* Allow APE to send/receive frames. */ |
3001 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) |
3002 | val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; |
3003 | |
3004 | /* Turn on DMA, clear stats */ |
3005 | CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); |
3006 | /* 5718 step 44 */ |
3007 | DELAY(40); |
3008 | |
3009 | /* 5718 step 45, 57XX step 79 */ |
3010 | /* Set misc. local control, enable interrupts on attentions */ |
3011 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); |
3012 | if (BGE_IS_5717_PLUS(sc)) { |
3013 | CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */ |
3014 | /* 5718 step 46 */ |
3015 | DELAY(100); |
3016 | } |
3017 | |
3018 | /* 57XX step 81 */ |
3019 | /* Turn on DMA completion state machine */ |
3020 | if (!(BGE_IS_5705_PLUS(sc))) |
3021 | CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); |
3022 | |
3023 | /* 5718 step 47, 57XX step 82 */ |
3024 | val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; |
3025 | |
3026 | /* 5718 step 48 */ |
3027 | /* Enable host coalescing bug fix. */ |
3028 | if (BGE_IS_5755_PLUS(sc)) |
3029 | val |= BGE_WDMAMODE_STATUS_TAG_FIX; |
3030 | |
3031 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) |
3032 | val |= BGE_WDMAMODE_BURST_ALL_DATA; |
3033 | |
3034 | /* Turn on write DMA state machine */ |
3035 | CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val); |
3036 | /* 5718 step 49 */ |
3037 | DELAY(40); |
3038 | |
3039 | val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; |
3040 | |
3041 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) |
3042 | val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; |
3043 | |
3044 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || |
3045 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || |
3046 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) |
3047 | val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | |
3048 | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | |
3049 | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; |
3050 | |
3051 | if (sc->bge_flags & BGEF_PCIE) |
3052 | val |= BGE_RDMAMODE_FIFO_LONG_BURST; |
3053 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { |
3054 | if (ifp->if_mtu <= ETHERMTU) |
3055 | val |= BGE_RDMAMODE_JMB_2K_MMRR; |
3056 | } |
3057 | if (sc->bge_flags & BGEF_TSO) |
3058 | val |= BGE_RDMAMODE_TSO4_ENABLE; |
3059 | |
3060 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { |
3061 | val |= CSR_READ_4(sc, BGE_RDMA_MODE) & |
3062 | BGE_RDMAMODE_H2BNC_VLAN_DET; |
3063 | /* |
3064 | * Allow multiple outstanding read requests from |
3065 | * non-LSO read DMA engine. |
3066 | */ |
3067 | val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; |
3068 | } |
3069 | |
3070 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || |
3071 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || |
3072 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || |
3073 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || |
3074 | BGE_IS_57765_PLUS(sc)) { |
3075 | dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL); |
3076 | /* |
3077 | * Adjust tx margin to prevent TX data corruption and |
3078 | * fix internal FIFO overflow. |
3079 | */ |
3080 | if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { |
3081 | dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | |
3082 | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | |
3083 | BGE_RDMA_RSRVCTRL_TXMRGN_MASK); |
3084 | dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | |
3085 | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | |
3086 | BGE_RDMA_RSRVCTRL_TXMRGN_320B; |
3087 | } |
3088 | /* |
3089 | * Enable fix for read DMA FIFO overruns. |
3090 | * The fix is to limit the number of RX BDs |
3091 | * the hardware would fetch at a fime. |
3092 | */ |
3093 | CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl | |
3094 | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); |
3095 | } |
3096 | |
3097 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { |
3098 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, |
3099 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | |
3100 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | |
3101 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); |
3102 | } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { |
3103 | /* |
3104 | * Allow 4KB burst length reads for non-LSO frames. |
3105 | * Enable 512B burst length reads for buffer descriptors. |
3106 | */ |
3107 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, |
3108 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | |
3109 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | |
3110 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); |
3111 | } |
3112 | |
3113 | /* Turn on read DMA state machine */ |
3114 | CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val); |
3115 | /* 5718 step 52 */ |
3116 | delay(40); |
3117 | |
3118 | /* 5718 step 56, 57XX step 84 */ |
3119 | /* Turn on RX data completion state machine */ |
3120 | CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); |
3121 | |
3122 | /* Turn on RX data and RX BD initiator state machine */ |
3123 | CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); |
3124 | |
3125 | /* 57XX step 85 */ |
3126 | /* Turn on Mbuf cluster free state machine */ |
3127 | if (!BGE_IS_5705_PLUS(sc)) |
3128 | CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); |
3129 | |
3130 | /* 5718 step 57, 57XX step 86 */ |
3131 | /* Turn on send data completion state machine */ |
3132 | val = BGE_SDCMODE_ENABLE; |
3133 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) |
3134 | val |= BGE_SDCMODE_CDELAY; |
3135 | CSR_WRITE_4(sc, BGE_SDC_MODE, val); |
3136 | |
3137 | /* 5718 step 58 */ |
3138 | /* Turn on send BD completion state machine */ |
3139 | CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); |
3140 | |
3141 | /* 57XX step 88 */ |
3142 | /* Turn on RX BD initiator state machine */ |
3143 | CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); |
3144 | |
3145 | /* 5718 step 60, 57XX step 90 */ |
3146 | /* Turn on send data initiator state machine */ |
3147 | if (sc->bge_flags & BGEF_TSO) { |
3148 | /* XXX: magic value from Linux driver */ |
3149 | CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | |
3150 | BGE_SDIMODE_HW_LSO_PRE_DMA); |
3151 | } else |
3152 | CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); |
3153 | |
3154 | /* 5718 step 61, 57XX step 91 */ |
3155 | /* Turn on send BD initiator state machine */ |
3156 | CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); |
3157 | |
3158 | /* 5718 step 62, 57XX step 92 */ |
3159 | /* Turn on send BD selector state machine */ |
3160 | CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); |
3161 | |
3162 | /* 5718 step 31, 57XX step 60 */ |
3163 | CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); |
3164 | /* 5718 step 32, 57XX step 61 */ |
3165 | CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, |
3166 | BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); |
3167 | |
3168 | /* ack/clear link change events */ |
3169 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | |
3170 | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | |
3171 | BGE_MACSTAT_LINK_CHANGED); |
3172 | CSR_WRITE_4(sc, BGE_MI_STS, 0); |
3173 | |
3174 | /* |
3175 | * Enable attention when the link has changed state for |
3176 | * devices that use auto polling. |
3177 | */ |
3178 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
3179 | CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); |
3180 | } else { |
3181 | if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) |
3182 | mimode = BGE_MIMODE_500KHZ_CONST; |
3183 | else |
3184 | mimode = BGE_MIMODE_BASE; |
3185 | /* 5718 step 68. 5718 step 69 (optionally). */ |
3186 | if (BGE_IS_5700_FAMILY(sc) || |
3187 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) { |
3188 | mimode |= BGE_MIMODE_AUTOPOLL; |
3189 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); |
3190 | } |
3191 | mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr); |
3192 | CSR_WRITE_4(sc, BGE_MI_MODE, mimode); |
3193 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) |
3194 | CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, |
3195 | BGE_EVTENB_MI_INTERRUPT); |
3196 | } |
3197 | |
3198 | /* |
3199 | * Clear any pending link state attention. |
3200 | * Otherwise some link state change events may be lost until attention |
3201 | * is cleared by bge_intr() -> bge_link_upd() sequence. |
3202 | * It's not necessary on newer BCM chips - perhaps enabling link |
3203 | * state change attentions implies clearing pending attention. |
3204 | */ |
3205 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | |
3206 | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | |
3207 | BGE_MACSTAT_LINK_CHANGED); |
3208 | |
3209 | /* Enable link state change attentions. */ |
3210 | BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); |
3211 | |
3212 | return 0; |
3213 | } |
3214 | |
3215 | static const struct bge_revision * |
3216 | bge_lookup_rev(uint32_t chipid) |
3217 | { |
3218 | const struct bge_revision *br; |
3219 | |
3220 | for (br = bge_revisions; br->br_name != NULL; br++) { |
3221 | if (br->br_chipid == chipid) |
3222 | return br; |
3223 | } |
3224 | |
3225 | for (br = bge_majorrevs; br->br_name != NULL; br++) { |
3226 | if (br->br_chipid == BGE_ASICREV(chipid)) |
3227 | return br; |
3228 | } |
3229 | |
3230 | return NULL; |
3231 | } |
3232 | |
3233 | static const struct bge_product * |
3234 | bge_lookup(const struct pci_attach_args *pa) |
3235 | { |
3236 | const struct bge_product *bp; |
3237 | |
3238 | for (bp = bge_products; bp->bp_name != NULL; bp++) { |
3239 | if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && |
3240 | PCI_PRODUCT(pa->pa_id) == bp->bp_product) |
3241 | return bp; |
3242 | } |
3243 | |
3244 | return NULL; |
3245 | } |
3246 | |
3247 | static uint32_t |
3248 | bge_chipid(const struct pci_attach_args *pa) |
3249 | { |
3250 | uint32_t id; |
3251 | |
3252 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) |
3253 | >> BGE_PCIMISCCTL_ASICREV_SHIFT; |
3254 | |
3255 | if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { |
3256 | switch (PCI_PRODUCT(pa->pa_id)) { |
3257 | case PCI_PRODUCT_BROADCOM_BCM5717: |
3258 | case PCI_PRODUCT_BROADCOM_BCM5718: |
3259 | case PCI_PRODUCT_BROADCOM_BCM5719: |
3260 | case PCI_PRODUCT_BROADCOM_BCM5720: |
3261 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, |
3262 | BGE_PCI_GEN2_PRODID_ASICREV); |
3263 | break; |
3264 | case PCI_PRODUCT_BROADCOM_BCM57761: |
3265 | case PCI_PRODUCT_BROADCOM_BCM57762: |
3266 | case PCI_PRODUCT_BROADCOM_BCM57765: |
3267 | case PCI_PRODUCT_BROADCOM_BCM57766: |
3268 | case PCI_PRODUCT_BROADCOM_BCM57781: |
3269 | case PCI_PRODUCT_BROADCOM_BCM57785: |
3270 | case PCI_PRODUCT_BROADCOM_BCM57791: |
3271 | case PCI_PRODUCT_BROADCOM_BCM57795: |
3272 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, |
3273 | BGE_PCI_GEN15_PRODID_ASICREV); |
3274 | break; |
3275 | default: |
3276 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, |
3277 | BGE_PCI_PRODID_ASICREV); |
3278 | break; |
3279 | } |
3280 | } |
3281 | |
3282 | return id; |
3283 | } |
3284 | |
3285 | /* |
3286 | * Return true if MSI can be used with this device. |
3287 | */ |
3288 | static int |
3289 | bge_can_use_msi(struct bge_softc *sc) |
3290 | { |
3291 | int can_use_msi = 0; |
3292 | |
3293 | switch (BGE_ASICREV(sc->bge_chipid)) { |
3294 | case BGE_ASICREV_BCM5714_A0: |
3295 | case BGE_ASICREV_BCM5714: |
3296 | /* |
3297 | * Apparently, MSI doesn't work when these chips are |
3298 | * configured in single-port mode. |
3299 | */ |
3300 | break; |
3301 | case BGE_ASICREV_BCM5750: |
3302 | if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX && |
3303 | BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX) |
3304 | can_use_msi = 1; |
3305 | break; |
3306 | default: |
3307 | if (BGE_IS_575X_PLUS(sc)) |
3308 | can_use_msi = 1; |
3309 | } |
3310 | return (can_use_msi); |
3311 | } |
3312 | |
3313 | /* |
3314 | * Probe for a Broadcom chip. Check the PCI vendor and device IDs |
3315 | * against our list and return its name if we find a match. Note |
3316 | * that since the Broadcom controller contains VPD support, we |
3317 | * can get the device name string from the controller itself instead |
3318 | * of the compiled-in string. This is a little slow, but it guarantees |
3319 | * we'll always announce the right product name. |
3320 | */ |
3321 | static int |
3322 | bge_probe(device_t parent, cfdata_t match, void *aux) |
3323 | { |
3324 | struct pci_attach_args *pa = (struct pci_attach_args *)aux; |
3325 | |
3326 | if (bge_lookup(pa) != NULL) |
3327 | return 1; |
3328 | |
3329 | return 0; |
3330 | } |
3331 | |
3332 | static void |
3333 | bge_attach(device_t parent, device_t self, void *aux) |
3334 | { |
3335 | struct bge_softc *sc = device_private(self); |
3336 | struct pci_attach_args *pa = aux; |
3337 | prop_dictionary_t dict; |
3338 | const struct bge_product *bp; |
3339 | const struct bge_revision *br; |
3340 | pci_chipset_tag_t pc; |
3341 | int counts[PCI_INTR_TYPE_SIZE]; |
3342 | pci_intr_type_t intr_type, max_type; |
3343 | const char *intrstr = NULL; |
3344 | uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5; |
3345 | uint32_t command; |
3346 | struct ifnet *ifp; |
3347 | uint32_t misccfg, mimode; |
3348 | void * kva; |
3349 | u_char eaddr[ETHER_ADDR_LEN]; |
3350 | pcireg_t memtype, subid, reg; |
3351 | bus_addr_t memaddr; |
3352 | uint32_t pm_ctl; |
3353 | bool no_seeprom; |
3354 | int capmask; |
3355 | int mii_flags; |
3356 | int map_flags; |
3357 | char intrbuf[PCI_INTRSTR_LEN]; |
3358 | |
3359 | bp = bge_lookup(pa); |
3360 | KASSERT(bp != NULL); |
3361 | |
3362 | sc->sc_pc = pa->pa_pc; |
3363 | sc->sc_pcitag = pa->pa_tag; |
3364 | sc->bge_dev = self; |
3365 | |
3366 | sc->bge_pa = *pa; |
3367 | pc = sc->sc_pc; |
3368 | subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); |
3369 | |
3370 | aprint_naive(": Ethernet controller\n" ); |
3371 | aprint_normal(": %s\n" , bp->bp_name); |
3372 | |
3373 | /* |
3374 | * Map control/status registers. |
3375 | */ |
3376 | DPRINTFN(5, ("Map control/status regs\n" )); |
3377 | command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); |
3378 | command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; |
3379 | pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); |
3380 | command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); |
3381 | |
3382 | if (!(command & PCI_COMMAND_MEM_ENABLE)) { |
3383 | aprint_error_dev(sc->bge_dev, |
3384 | "failed to enable memory mapping!\n" ); |
3385 | return; |
3386 | } |
3387 | |
3388 | DPRINTFN(5, ("pci_mem_find\n" )); |
3389 | memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); |
3390 | switch (memtype) { |
3391 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
3392 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
3393 | #if 0 |
3394 | if (pci_mapreg_map(pa, BGE_PCI_BAR0, |
3395 | memtype, 0, &sc->bge_btag, &sc->bge_bhandle, |
3396 | &memaddr, &sc->bge_bsize) == 0) |
3397 | break; |
3398 | #else |
3399 | /* |
3400 | * Workaround for PCI prefetchable bit. Some BCM5717-5720 based |
3401 | * system get NMI on boot (PR#48451). This problem might not be |
3402 | * the driver's bug but our PCI common part's bug. Until we |
3403 | * find a real reason, we ignore the prefetchable bit. |
3404 | */ |
3405 | if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0, |
3406 | memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) { |
3407 | map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; |
3408 | if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize, |
3409 | map_flags, &sc->bge_bhandle) == 0) { |
3410 | sc->bge_btag = pa->pa_memt; |
3411 | break; |
3412 | } |
3413 | } |
3414 | #endif |
3415 | default: |
3416 | aprint_error_dev(sc->bge_dev, "can't find mem space\n" ); |
3417 | return; |
3418 | } |
3419 | |
3420 | /* Save various chip information. */ |
3421 | sc->bge_chipid = bge_chipid(pa); |
3422 | sc->bge_phy_addr = bge_phy_addr(sc); |
3423 | |
3424 | if ((pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, |
3425 | &sc->bge_pciecap, NULL) != 0) |
3426 | || (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)) { |
3427 | /* PCIe */ |
3428 | sc->bge_flags |= BGEF_PCIE; |
3429 | /* Extract supported maximum payload size. */ |
3430 | reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
3431 | sc->bge_pciecap + PCIE_DCAP); |
3432 | sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD); |
3433 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || |
3434 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) |
3435 | sc->bge_expmrq = 2048; |
3436 | else |
3437 | sc->bge_expmrq = 4096; |
3438 | bge_set_max_readrq(sc); |
3439 | } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & |
3440 | BGE_PCISTATE_PCI_BUSMODE) == 0) { |
3441 | /* PCI-X */ |
3442 | sc->bge_flags |= BGEF_PCIX; |
3443 | if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, |
3444 | &sc->bge_pcixcap, NULL) == 0) |
3445 | aprint_error_dev(sc->bge_dev, |
3446 | "unable to find PCIX capability\n" ); |
3447 | } |
3448 | |
3449 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) { |
3450 | /* |
3451 | * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) |
3452 | * can clobber the chip's PCI config-space power control |
3453 | * registers, leaving the card in D3 powersave state. We do |
3454 | * not have memory-mapped registers in this state, so force |
3455 | * device into D0 state before starting initialization. |
3456 | */ |
3457 | pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); |
3458 | pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); |
3459 | pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ |
3460 | pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); |
3461 | DELAY(1000); /* 27 usec is allegedly sufficent */ |
3462 | } |
3463 | |
3464 | /* Save chipset family. */ |
3465 | switch (BGE_ASICREV(sc->bge_chipid)) { |
3466 | case BGE_ASICREV_BCM5717: |
3467 | case BGE_ASICREV_BCM5719: |
3468 | case BGE_ASICREV_BCM5720: |
3469 | sc->bge_flags |= BGEF_5717_PLUS; |
3470 | /* FALLTHROUGH */ |
3471 | case BGE_ASICREV_BCM57765: |
3472 | case BGE_ASICREV_BCM57766: |
3473 | if (!BGE_IS_5717_PLUS(sc)) |
3474 | sc->bge_flags |= BGEF_57765_FAMILY; |
3475 | sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS | |
3476 | BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE; |
3477 | /* Jumbo frame on BCM5719 A0 does not work. */ |
3478 | if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && |
3479 | (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) |
3480 | sc->bge_flags &= ~BGEF_JUMBO_CAPABLE; |
3481 | break; |
3482 | case BGE_ASICREV_BCM5755: |
3483 | case BGE_ASICREV_BCM5761: |
3484 | case BGE_ASICREV_BCM5784: |
3485 | case BGE_ASICREV_BCM5785: |
3486 | case BGE_ASICREV_BCM5787: |
3487 | case BGE_ASICREV_BCM57780: |
3488 | sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS; |
3489 | break; |
3490 | case BGE_ASICREV_BCM5700: |
3491 | case BGE_ASICREV_BCM5701: |
3492 | case BGE_ASICREV_BCM5703: |
3493 | case BGE_ASICREV_BCM5704: |
3494 | sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE; |
3495 | break; |
3496 | case BGE_ASICREV_BCM5714_A0: |
3497 | case BGE_ASICREV_BCM5780: |
3498 | case BGE_ASICREV_BCM5714: |
3499 | sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE; |
3500 | /* FALLTHROUGH */ |
3501 | case BGE_ASICREV_BCM5750: |
3502 | case BGE_ASICREV_BCM5752: |
3503 | case BGE_ASICREV_BCM5906: |
3504 | sc->bge_flags |= BGEF_575X_PLUS; |
3505 | /* FALLTHROUGH */ |
3506 | case BGE_ASICREV_BCM5705: |
3507 | sc->bge_flags |= BGEF_5705_PLUS; |
3508 | break; |
3509 | } |
3510 | |
3511 | /* Identify chips with APE processor. */ |
3512 | switch (BGE_ASICREV(sc->bge_chipid)) { |
3513 | case BGE_ASICREV_BCM5717: |
3514 | case BGE_ASICREV_BCM5719: |
3515 | case BGE_ASICREV_BCM5720: |
3516 | case BGE_ASICREV_BCM5761: |
3517 | sc->bge_flags |= BGEF_APE; |
3518 | break; |
3519 | } |
3520 | |
3521 | /* |
3522 | * The 40bit DMA bug applies to the 5714/5715 controllers and is |
3523 | * not actually a MAC controller bug but an issue with the embedded |
3524 | * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. |
3525 | */ |
3526 | if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0)) |
3527 | sc->bge_flags |= BGEF_40BIT_BUG; |
3528 | |
3529 | /* Chips with APE need BAR2 access for APE registers/memory. */ |
3530 | if ((sc->bge_flags & BGEF_APE) != 0) { |
3531 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); |
3532 | #if 0 |
3533 | if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, |
3534 | &sc->bge_apetag, &sc->bge_apehandle, NULL, |
3535 | &sc->bge_apesize)) { |
3536 | aprint_error_dev(sc->bge_dev, |
3537 | "couldn't map BAR2 memory\n" ); |
3538 | return; |
3539 | } |
3540 | #else |
3541 | /* |
3542 | * Workaround for PCI prefetchable bit. Some BCM5717-5720 based |
3543 | * system get NMI on boot (PR#48451). This problem might not be |
3544 | * the driver's bug but our PCI common part's bug. Until we |
3545 | * find a real reason, we ignore the prefetchable bit. |
3546 | */ |
3547 | if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2, |
3548 | memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) { |
3549 | aprint_error_dev(sc->bge_dev, |
3550 | "couldn't map BAR2 memory\n" ); |
3551 | return; |
3552 | } |
3553 | |
3554 | map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; |
3555 | if (bus_space_map(pa->pa_memt, memaddr, |
3556 | sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) { |
3557 | aprint_error_dev(sc->bge_dev, |
3558 | "couldn't map BAR2 memory\n" ); |
3559 | return; |
3560 | } |
3561 | sc->bge_apetag = pa->pa_memt; |
3562 | #endif |
3563 | |
3564 | /* Enable APE register/memory access by host driver. */ |
3565 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); |
3566 | reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | |
3567 | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | |
3568 | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; |
3569 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); |
3570 | |
3571 | bge_ape_lock_init(sc); |
3572 | bge_ape_read_fw_ver(sc); |
3573 | } |
3574 | |
3575 | /* Identify the chips that use an CPMU. */ |
3576 | if (BGE_IS_5717_PLUS(sc) || |
3577 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || |
3578 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || |
3579 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || |
3580 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) |
3581 | sc->bge_flags |= BGEF_CPMU_PRESENT; |
3582 | |
3583 | /* Set MI_MODE */ |
3584 | mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr); |
3585 | if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) |
3586 | mimode |= BGE_MIMODE_500KHZ_CONST; |
3587 | else |
3588 | mimode |= BGE_MIMODE_BASE; |
3589 | CSR_WRITE_4(sc, BGE_MI_MODE, mimode); |
3590 | |
3591 | /* |
3592 | * When using the BCM5701 in PCI-X mode, data corruption has |
3593 | * been observed in the first few bytes of some received packets. |
3594 | * Aligning the packet buffer in memory eliminates the corruption. |
3595 | * Unfortunately, this misaligns the packet payloads. On platforms |
3596 | * which do not support unaligned accesses, we will realign the |
3597 | * payloads by copying the received packets. |
3598 | */ |
3599 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && |
3600 | sc->bge_flags & BGEF_PCIX) |
3601 | sc->bge_flags |= BGEF_RX_ALIGNBUG; |
3602 | |
3603 | if (BGE_IS_5700_FAMILY(sc)) |
3604 | sc->bge_flags |= BGEF_JUMBO_CAPABLE; |
3605 | |
3606 | misccfg = CSR_READ_4(sc, BGE_MISC_CFG); |
3607 | misccfg &= BGE_MISCCFG_BOARD_ID_MASK; |
3608 | |
3609 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && |
3610 | (misccfg == BGE_MISCCFG_BOARD_ID_5788 || |
3611 | misccfg == BGE_MISCCFG_BOARD_ID_5788M)) |
3612 | sc->bge_flags |= BGEF_IS_5788; |
3613 | |
3614 | /* |
3615 | * Some controllers seem to require a special firmware to use |
3616 | * TSO. But the firmware is not available to FreeBSD and Linux |
3617 | * claims that the TSO performed by the firmware is slower than |
3618 | * hardware based TSO. Moreover the firmware based TSO has one |
3619 | * known bug which can't handle TSO if ethernet header + IP/TCP |
3620 | * header is greater than 80 bytes. The workaround for the TSO |
3621 | * bug exist but it seems it's too expensive than not using |
3622 | * TSO at all. Some hardwares also have the TSO bug so limit |
3623 | * the TSO to the controllers that are not affected TSO issues |
3624 | * (e.g. 5755 or higher). |
3625 | */ |
3626 | if (BGE_IS_5755_PLUS(sc)) { |
3627 | /* |
3628 | * BCM5754 and BCM5787 shares the same ASIC id so |
3629 | * explicit device id check is required. |
3630 | */ |
3631 | if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && |
3632 | (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) |
3633 | sc->bge_flags |= BGEF_TSO; |
3634 | } |
3635 | |
3636 | capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */ |
3637 | if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && |
3638 | (misccfg == 0x4000 || misccfg == 0x8000)) || |
3639 | (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && |
3640 | PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && |
3641 | (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || |
3642 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || |
3643 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || |
3644 | (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && |
3645 | (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || |
3646 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || |
3647 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || |
3648 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || |
3649 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || |
3650 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || |
3651 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { |
3652 | /* These chips are 10/100 only. */ |
3653 | capmask &= ~BMSR_EXTSTAT; |
3654 | sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; |
3655 | } |
3656 | |
3657 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || |
3658 | (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && |
3659 | (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && |
3660 | sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) |
3661 | sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; |
3662 | |
3663 | /* Set various PHY bug flags. */ |
3664 | if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || |
3665 | sc->bge_chipid == BGE_CHIPID_BCM5701_B0) |
3666 | sc->bge_phy_flags |= BGEPHYF_CRC_BUG; |
3667 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || |
3668 | BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) |
3669 | sc->bge_phy_flags |= BGEPHYF_ADC_BUG; |
3670 | if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) |
3671 | sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG; |
3672 | if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || |
3673 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && |
3674 | PCI_VENDOR(subid) == PCI_VENDOR_DELL) |
3675 | sc->bge_phy_flags |= BGEPHYF_NO_3LED; |
3676 | if (BGE_IS_5705_PLUS(sc) && |
3677 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && |
3678 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && |
3679 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && |
3680 | !BGE_IS_57765_PLUS(sc)) { |
3681 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || |
3682 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || |
3683 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || |
3684 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { |
3685 | if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && |
3686 | PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) |
3687 | sc->bge_phy_flags |= BGEPHYF_JITTER_BUG; |
3688 | if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) |
3689 | sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM; |
3690 | } else |
3691 | sc->bge_phy_flags |= BGEPHYF_BER_BUG; |
3692 | } |
3693 | |
3694 | /* |
3695 | * SEEPROM check. |
3696 | * First check if firmware knows we do not have SEEPROM. |
3697 | */ |
3698 | if (prop_dictionary_get_bool(device_properties(self), |
3699 | "without-seeprom" , &no_seeprom) && no_seeprom) |
3700 | sc->bge_flags |= BGEF_NO_EEPROM; |
3701 | |
3702 | else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) |
3703 | sc->bge_flags |= BGEF_NO_EEPROM; |
3704 | |
3705 | /* Now check the 'ROM failed' bit on the RX CPU */ |
3706 | else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) |
3707 | sc->bge_flags |= BGEF_NO_EEPROM; |
3708 | |
3709 | sc->bge_asf_mode = 0; |
3710 | /* No ASF if APE present. */ |
3711 | if ((sc->bge_flags & BGEF_APE) == 0) { |
3712 | if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == |
3713 | BGE_SRAM_DATA_SIG_MAGIC)) { |
3714 | if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & |
3715 | BGE_HWCFG_ASF) { |
3716 | sc->bge_asf_mode |= ASF_ENABLE; |
3717 | sc->bge_asf_mode |= ASF_STACKUP; |
3718 | if (BGE_IS_575X_PLUS(sc)) |
3719 | sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; |
3720 | } |
3721 | } |
3722 | } |
3723 | |
3724 | /* MSI-X will be used in future */ |
3725 | counts[PCI_INTR_TYPE_MSI] = 1; |
3726 | counts[PCI_INTR_TYPE_INTX] = 1; |
3727 | /* Check MSI capability */ |
3728 | if (bge_can_use_msi(sc) != 0) { |
3729 | max_type = PCI_INTR_TYPE_MSI; |
3730 | sc->bge_flags |= BGEF_MSI; |
3731 | } else |
3732 | max_type = PCI_INTR_TYPE_INTX; |
3733 | |
3734 | alloc_retry: |
3735 | if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) { |
3736 | aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n" ); |
3737 | return; |
3738 | } |
3739 | |
3740 | DPRINTFN(5, ("pci_intr_string\n" )); |
3741 | intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf, |
3742 | sizeof(intrbuf)); |
3743 | DPRINTFN(5, ("pci_intr_establish\n" )); |
3744 | sc->bge_intrhand = pci_intr_establish(pc, sc->bge_pihp[0], IPL_NET, |
3745 | bge_intr, sc); |
3746 | if (sc->bge_intrhand == NULL) { |
3747 | intr_type = pci_intr_type(pc, sc->bge_pihp[0]); |
3748 | aprint_error_dev(sc->bge_dev,"unable to establish %s\n" , |
3749 | (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx" ); |
3750 | pci_intr_release(pc, sc->bge_pihp, 1); |
3751 | switch (intr_type) { |
3752 | case PCI_INTR_TYPE_MSI: |
3753 | /* The next try is for INTx: Disable MSI */ |
3754 | max_type = PCI_INTR_TYPE_INTX; |
3755 | counts[PCI_INTR_TYPE_INTX] = 1; |
3756 | sc->bge_flags &= ~BGEF_MSI; |
3757 | goto alloc_retry; |
3758 | case PCI_INTR_TYPE_INTX: |
3759 | default: |
3760 | /* See below */ |
3761 | break; |
3762 | } |
3763 | } |
3764 | |
3765 | if (sc->bge_intrhand == NULL) { |
3766 | aprint_error_dev(sc->bge_dev, |
3767 | "couldn't establish interrupt%s%s\n" , |
3768 | intrstr ? " at " : "" , intrstr ? intrstr : "" ); |
3769 | return; |
3770 | } |
3771 | aprint_normal_dev(sc->bge_dev, "interrupting at %s\n" , intrstr); |
3772 | |
3773 | /* |
3774 | * All controllers except BCM5700 supports tagged status but |
3775 | * we use tagged status only for MSI case on BCM5717. Otherwise |
3776 | * MSI on BCM5717 does not work. |
3777 | */ |
3778 | if (BGE_IS_5717_PLUS(sc) && sc->bge_flags & BGEF_MSI) |
3779 | sc->bge_flags |= BGEF_TAGGED_STATUS; |
3780 | |
3781 | /* |
3782 | * Reset NVRAM before bge_reset(). It's required to acquire NVRAM |
3783 | * lock in bge_reset(). |
3784 | */ |
3785 | CSR_WRITE_4(sc, BGE_EE_ADDR, |
3786 | BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); |
3787 | delay(1000); |
3788 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); |
3789 | |
3790 | bge_stop_fw(sc); |
3791 | bge_sig_pre_reset(sc, BGE_RESET_START); |
3792 | if (bge_reset(sc)) |
3793 | aprint_error_dev(sc->bge_dev, "chip reset failed\n" ); |
3794 | |
3795 | /* |
3796 | * Read the hardware config word in the first 32k of NIC internal |
3797 | * memory, or fall back to the config word in the EEPROM. |
3798 | * Note: on some BCM5700 cards, this value appears to be unset. |
3799 | */ |
3800 | hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0; |
3801 | if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == |
3802 | BGE_SRAM_DATA_SIG_MAGIC) { |
3803 | uint32_t tmp; |
3804 | |
3805 | hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); |
3806 | tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >> |
3807 | BGE_SRAM_DATA_VER_SHIFT; |
3808 | if ((0 < tmp) && (tmp < 0x100)) |
3809 | hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2); |
3810 | if (sc->bge_flags & BGEF_PCIE) |
3811 | hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3); |
3812 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) |
3813 | hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4); |
3814 | if (BGE_IS_5717_PLUS(sc)) |
3815 | hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5); |
3816 | } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) { |
3817 | bge_read_eeprom(sc, (void *)&hwcfg, |
3818 | BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); |
3819 | hwcfg = be32toh(hwcfg); |
3820 | } |
3821 | aprint_normal_dev(sc->bge_dev, |
3822 | "HW config %08x, %08x, %08x, %08x %08x\n" , |
3823 | hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5); |
3824 | |
3825 | bge_sig_legacy(sc, BGE_RESET_START); |
3826 | bge_sig_post_reset(sc, BGE_RESET_START); |
3827 | |
3828 | if (bge_chipinit(sc)) { |
3829 | aprint_error_dev(sc->bge_dev, "chip initialization failed\n" ); |
3830 | bge_release_resources(sc); |
3831 | return; |
3832 | } |
3833 | |
3834 | /* |
3835 | * Get station address from the EEPROM. |
3836 | */ |
3837 | if (bge_get_eaddr(sc, eaddr)) { |
3838 | aprint_error_dev(sc->bge_dev, |
3839 | "failed to read station address\n" ); |
3840 | bge_release_resources(sc); |
3841 | return; |
3842 | } |
3843 | |
3844 | br = bge_lookup_rev(sc->bge_chipid); |
3845 | |
3846 | if (br == NULL) { |
3847 | aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)" , |
3848 | sc->bge_chipid); |
3849 | } else { |
3850 | aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)" , |
3851 | br->br_name, sc->bge_chipid); |
3852 | } |
3853 | aprint_normal(", Ethernet address %s\n" , ether_sprintf(eaddr)); |
3854 | |
3855 | /* Allocate the general information block and ring buffers. */ |
3856 | if (pci_dma64_available(pa)) |
3857 | sc->bge_dmatag = pa->pa_dmat64; |
3858 | else |
3859 | sc->bge_dmatag = pa->pa_dmat; |
3860 | |
3861 | /* 40bit DMA workaround */ |
3862 | if (sizeof(bus_addr_t) > 4) { |
3863 | if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) { |
3864 | bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */ |
3865 | |
3866 | if (bus_dmatag_subregion(olddmatag, 0, |
3867 | (bus_addr_t)(1ULL << 40), &(sc->bge_dmatag), |
3868 | BUS_DMA_NOWAIT) != 0) { |
3869 | aprint_error_dev(self, |
3870 | "WARNING: failed to restrict dma range," |
3871 | " falling back to parent bus dma range\n" ); |
3872 | sc->bge_dmatag = olddmatag; |
3873 | } |
3874 | } |
3875 | } |
3876 | DPRINTFN(5, ("bus_dmamem_alloc\n" )); |
3877 | if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), |
3878 | PAGE_SIZE, 0, &sc->bge_ring_seg, 1, |
3879 | &sc->bge_ring_rseg, BUS_DMA_NOWAIT)) { |
3880 | aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n" ); |
3881 | return; |
3882 | } |
3883 | DPRINTFN(5, ("bus_dmamem_map\n" )); |
3884 | if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, |
3885 | sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva, |
3886 | BUS_DMA_NOWAIT)) { |
3887 | aprint_error_dev(sc->bge_dev, |
3888 | "can't map DMA buffers (%zu bytes)\n" , |
3889 | sizeof(struct bge_ring_data)); |
3890 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, |
3891 | sc->bge_ring_rseg); |
3892 | return; |
3893 | } |
3894 | DPRINTFN(5, ("bus_dmamem_create\n" )); |
3895 | if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, |
3896 | sizeof(struct bge_ring_data), 0, |
3897 | BUS_DMA_NOWAIT, &sc->bge_ring_map)) { |
3898 | aprint_error_dev(sc->bge_dev, "can't create DMA map\n" ); |
3899 | bus_dmamem_unmap(sc->bge_dmatag, kva, |
3900 | sizeof(struct bge_ring_data)); |
3901 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, |
3902 | sc->bge_ring_rseg); |
3903 | return; |
3904 | } |
3905 | DPRINTFN(5, ("bus_dmamem_load\n" )); |
3906 | if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, |
3907 | sizeof(struct bge_ring_data), NULL, |
3908 | BUS_DMA_NOWAIT)) { |
3909 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); |
3910 | bus_dmamem_unmap(sc->bge_dmatag, kva, |
3911 | sizeof(struct bge_ring_data)); |
3912 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, |
3913 | sc->bge_ring_rseg); |
3914 | return; |
3915 | } |
3916 | |
3917 | DPRINTFN(5, ("bzero\n" )); |
3918 | sc->bge_rdata = (struct bge_ring_data *)kva; |
3919 | |
3920 | memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); |
3921 | |
3922 | /* Try to allocate memory for jumbo buffers. */ |
3923 | if (BGE_IS_JUMBO_CAPABLE(sc)) { |
3924 | if (bge_alloc_jumbo_mem(sc)) { |
3925 | aprint_error_dev(sc->bge_dev, |
3926 | "jumbo buffer allocation failed\n" ); |
3927 | } else |
3928 | sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; |
3929 | } |
3930 | |
3931 | /* Set default tuneable values. */ |
3932 | sc->bge_stat_ticks = BGE_TICKS_PER_SEC; |
3933 | sc->bge_rx_coal_ticks = 150; |
3934 | sc->bge_rx_max_coal_bds = 64; |
3935 | sc->bge_tx_coal_ticks = 300; |
3936 | sc->bge_tx_max_coal_bds = 400; |
3937 | if (BGE_IS_5705_PLUS(sc)) { |
3938 | sc->bge_tx_coal_ticks = (12 * 5); |
3939 | sc->bge_tx_max_coal_bds = (12 * 5); |
3940 | aprint_verbose_dev(sc->bge_dev, |
3941 | "setting short Tx thresholds\n" ); |
3942 | } |
3943 | |
3944 | if (BGE_IS_5717_PLUS(sc)) |
3945 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; |
3946 | else if (BGE_IS_5705_PLUS(sc)) |
3947 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; |
3948 | else |
3949 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; |
3950 | |
3951 | /* Set up ifnet structure */ |
3952 | ifp = &sc->ethercom.ec_if; |
3953 | ifp->if_softc = sc; |
3954 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
3955 | ifp->if_ioctl = bge_ioctl; |
3956 | ifp->if_stop = bge_stop; |
3957 | ifp->if_start = bge_start; |
3958 | ifp->if_init = bge_init; |
3959 | ifp->if_watchdog = bge_watchdog; |
3960 | IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); |
3961 | IFQ_SET_READY(&ifp->if_snd); |
3962 | DPRINTFN(5, ("strcpy if_xname\n" )); |
3963 | strcpy(ifp->if_xname, device_xname(sc->bge_dev)); |
3964 | |
3965 | if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) |
3966 | sc->ethercom.ec_if.if_capabilities |= |
3967 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; |
3968 | #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ |
3969 | sc->ethercom.ec_if.if_capabilities |= |
3970 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
3971 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
3972 | #endif |
3973 | sc->ethercom.ec_capabilities |= |
3974 | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; |
3975 | |
3976 | if (sc->bge_flags & BGEF_TSO) |
3977 | sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; |
3978 | |
3979 | /* |
3980 | * Do MII setup. |
3981 | */ |
3982 | DPRINTFN(5, ("mii setup\n" )); |
3983 | sc->bge_mii.mii_ifp = ifp; |
3984 | sc->bge_mii.mii_readreg = bge_miibus_readreg; |
3985 | sc->bge_mii.mii_writereg = bge_miibus_writereg; |
3986 | sc->bge_mii.mii_statchg = bge_miibus_statchg; |
3987 | |
3988 | /* |
3989 | * Figure out what sort of media we have by checking the hardware |
3990 | * config word. Note: on some BCM5700 cards, this value appears to be |
3991 | * unset. If that's the case, we have to rely on identifying the NIC |
3992 | * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41. |
3993 | * The SysKonnect SK-9D41 is a 1000baseSX card. |
3994 | */ |
3995 | if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 || |
3996 | (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { |
3997 | if (BGE_IS_5705_PLUS(sc)) { |
3998 | sc->bge_flags |= BGEF_FIBER_MII; |
3999 | sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; |
4000 | } else |
4001 | sc->bge_flags |= BGEF_FIBER_TBI; |
4002 | } |
4003 | |
4004 | /* Set bge_phy_flags before prop_dictionary_set_uint32() */ |
4005 | if (BGE_IS_JUMBO_CAPABLE(sc)) |
4006 | sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE; |
4007 | |
4008 | /* set phyflags and chipid before mii_attach() */ |
4009 | dict = device_properties(self); |
4010 | prop_dictionary_set_uint32(dict, "phyflags" , sc->bge_phy_flags); |
4011 | prop_dictionary_set_uint32(dict, "chipid" , sc->bge_chipid); |
4012 | |
4013 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
4014 | ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, |
4015 | bge_ifmedia_sts); |
4016 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); |
4017 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX, |
4018 | 0, NULL); |
4019 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); |
4020 | ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); |
4021 | /* Pretend the user requested this setting */ |
4022 | sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; |
4023 | } else { |
4024 | /* |
4025 | * Do transceiver setup and tell the firmware the |
4026 | * driver is down so we can try to get access the |
4027 | * probe if ASF is running. Retry a couple of times |
4028 | * if we get a conflict with the ASF firmware accessing |
4029 | * the PHY. |
4030 | */ |
4031 | BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); |
4032 | bge_asf_driver_up(sc); |
4033 | |
4034 | ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, |
4035 | bge_ifmedia_sts); |
4036 | mii_flags = MIIF_DOPAUSE; |
4037 | if (sc->bge_flags & BGEF_FIBER_MII) |
4038 | mii_flags |= MIIF_HAVEFIBER; |
4039 | mii_attach(sc->bge_dev, &sc->bge_mii, capmask, sc->bge_phy_addr, |
4040 | MII_OFFSET_ANY, mii_flags); |
4041 | |
4042 | if (LIST_EMPTY(&sc->bge_mii.mii_phys)) { |
4043 | aprint_error_dev(sc->bge_dev, "no PHY found!\n" ); |
4044 | ifmedia_add(&sc->bge_mii.mii_media, |
4045 | IFM_ETHER|IFM_MANUAL, 0, NULL); |
4046 | ifmedia_set(&sc->bge_mii.mii_media, |
4047 | IFM_ETHER|IFM_MANUAL); |
4048 | } else |
4049 | ifmedia_set(&sc->bge_mii.mii_media, |
4050 | IFM_ETHER|IFM_AUTO); |
4051 | |
4052 | /* |
4053 | * Now tell the firmware we are going up after probing the PHY |
4054 | */ |
4055 | if (sc->bge_asf_mode & ASF_STACKUP) |
4056 | BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); |
4057 | } |
4058 | |
4059 | /* |
4060 | * Call MI attach routine. |
4061 | */ |
4062 | DPRINTFN(5, ("if_attach\n" )); |
4063 | if_attach(ifp); |
4064 | DPRINTFN(5, ("ether_ifattach\n" )); |
4065 | ether_ifattach(ifp, eaddr); |
4066 | ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); |
4067 | rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), |
4068 | RND_TYPE_NET, RND_FLAG_DEFAULT); |
4069 | #ifdef BGE_EVENT_COUNTERS |
4070 | /* |
4071 | * Attach event counters. |
4072 | */ |
4073 | evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, |
4074 | NULL, device_xname(sc->bge_dev), "intr" ); |
4075 | evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, |
4076 | NULL, device_xname(sc->bge_dev), "tx_xoff" ); |
4077 | evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, |
4078 | NULL, device_xname(sc->bge_dev), "tx_xon" ); |
4079 | evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, |
4080 | NULL, device_xname(sc->bge_dev), "rx_xoff" ); |
4081 | evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, |
4082 | NULL, device_xname(sc->bge_dev), "rx_xon" ); |
4083 | evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, |
4084 | NULL, device_xname(sc->bge_dev), "rx_macctl" ); |
4085 | evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, |
4086 | NULL, device_xname(sc->bge_dev), "xoffentered" ); |
4087 | #endif /* BGE_EVENT_COUNTERS */ |
4088 | DPRINTFN(5, ("callout_init\n" )); |
4089 | callout_init(&sc->bge_timeout, 0); |
4090 | |
4091 | if (pmf_device_register(self, NULL, NULL)) |
4092 | pmf_class_network_register(self, ifp); |
4093 | else |
4094 | aprint_error_dev(self, "couldn't establish power handler\n" ); |
4095 | |
4096 | bge_sysctl_init(sc); |
4097 | |
4098 | #ifdef BGE_DEBUG |
4099 | bge_debug_info(sc); |
4100 | #endif |
4101 | } |
4102 | |
4103 | /* |
4104 | * Stop all chip I/O so that the kernel's probe routines don't |
4105 | * get confused by errant DMAs when rebooting. |
4106 | */ |
4107 | static int |
4108 | bge_detach(device_t self, int flags __unused) |
4109 | { |
4110 | struct bge_softc *sc = device_private(self); |
4111 | struct ifnet *ifp = &sc->ethercom.ec_if; |
4112 | int s; |
4113 | |
4114 | s = splnet(); |
4115 | /* Stop the interface. Callouts are stopped in it. */ |
4116 | bge_stop(ifp, 1); |
4117 | splx(s); |
4118 | |
4119 | mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); |
4120 | |
4121 | /* Delete all remaining media. */ |
4122 | ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY); |
4123 | |
4124 | ether_ifdetach(ifp); |
4125 | if_detach(ifp); |
4126 | |
4127 | bge_release_resources(sc); |
4128 | |
4129 | return 0; |
4130 | } |
4131 | |
4132 | static void |
4133 | bge_release_resources(struct bge_softc *sc) |
4134 | { |
4135 | |
4136 | /* Disestablish the interrupt handler */ |
4137 | if (sc->bge_intrhand != NULL) { |
4138 | pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand); |
4139 | pci_intr_release(sc->sc_pc, sc->bge_pihp, 1); |
4140 | sc->bge_intrhand = NULL; |
4141 | } |
4142 | |
4143 | if (sc->bge_dmatag != NULL) { |
4144 | bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); |
4145 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); |
4146 | bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata, |
4147 | sizeof(struct bge_ring_data)); |
4148 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, |
4149 | sc->bge_ring_rseg); |
4150 | } |
4151 | |
4152 | /* Unmap the device registers */ |
4153 | if (sc->bge_bsize != 0) { |
4154 | bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); |
4155 | sc->bge_bsize = 0; |
4156 | } |
4157 | |
4158 | /* Unmap the APE registers */ |
4159 | if (sc->bge_apesize != 0) { |
4160 | bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, |
4161 | sc->bge_apesize); |
4162 | sc->bge_apesize = 0; |
4163 | } |
4164 | } |
4165 | |
4166 | static int |
4167 | bge_reset(struct bge_softc *sc) |
4168 | { |
4169 | uint32_t cachesize, command; |
4170 | uint32_t reset, mac_mode, mac_mode_mask; |
4171 | pcireg_t devctl, reg; |
4172 | int i, val; |
4173 | void (*write_op)(struct bge_softc *, int, int); |
4174 | |
4175 | /* Make mask for BGE_MAC_MODE register. */ |
4176 | mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; |
4177 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) |
4178 | mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; |
4179 | /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */ |
4180 | mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; |
4181 | |
4182 | if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && |
4183 | (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { |
4184 | if (sc->bge_flags & BGEF_PCIE) |
4185 | write_op = bge_writemem_direct; |
4186 | else |
4187 | write_op = bge_writemem_ind; |
4188 | } else |
4189 | write_op = bge_writereg_ind; |
4190 | |
4191 | /* 57XX step 4 */ |
4192 | /* Acquire the NVM lock */ |
4193 | if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 && |
4194 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && |
4195 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) { |
4196 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); |
4197 | for (i = 0; i < 8000; i++) { |
4198 | if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & |
4199 | BGE_NVRAMSWARB_GNT1) |
4200 | break; |
4201 | DELAY(20); |
4202 | } |
4203 | if (i == 8000) { |
4204 | printf("%s: NVRAM lock timedout!\n" , |
4205 | device_xname(sc->bge_dev)); |
4206 | } |
4207 | } |
4208 | |
4209 | /* Take APE lock when performing reset. */ |
4210 | bge_ape_lock(sc, BGE_APE_LOCK_GRC); |
4211 | |
4212 | /* 57XX step 3 */ |
4213 | /* Save some important PCI state. */ |
4214 | cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); |
4215 | /* 5718 reset step 3 */ |
4216 | command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); |
4217 | |
4218 | /* 5718 reset step 5, 57XX step 5b-5d */ |
4219 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, |
4220 | BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | |
4221 | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); |
4222 | |
4223 | /* XXX ???: Disable fastboot on controllers that support it. */ |
4224 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || |
4225 | BGE_IS_5755_PLUS(sc)) |
4226 | CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); |
4227 | |
4228 | /* 5718 reset step 2, 57XX step 6 */ |
4229 | /* |
4230 | * Write the magic number to SRAM at offset 0xB50. |
4231 | * When firmware finishes its initialization it will |
4232 | * write ~BGE_MAGIC_NUMBER to the same location. |
4233 | */ |
4234 | bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); |
4235 | |
4236 | /* 5718 reset step 6, 57XX step 7 */ |
4237 | reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; |
4238 | /* |
4239 | * XXX: from FreeBSD/Linux; no documentation |
4240 | */ |
4241 | if (sc->bge_flags & BGEF_PCIE) { |
4242 | if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) && |
4243 | !BGE_IS_57765_PLUS(sc) && |
4244 | (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) == |
4245 | (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) { |
4246 | /* PCI Express 1.0 system */ |
4247 | CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG, |
4248 | BGE_PHY_PCIE_SCRAM_MODE); |
4249 | } |
4250 | if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { |
4251 | /* |
4252 | * Prevent PCI Express link training |
4253 | * during global reset. |
4254 | */ |
4255 | CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); |
4256 | reset |= (1 << 29); |
4257 | } |
4258 | } |
4259 | |
4260 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { |
4261 | i = CSR_READ_4(sc, BGE_VCPU_STATUS); |
4262 | CSR_WRITE_4(sc, BGE_VCPU_STATUS, |
4263 | i | BGE_VCPU_STATUS_DRV_RESET); |
4264 | i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); |
4265 | CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, |
4266 | i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); |
4267 | } |
4268 | |
4269 | /* |
4270 | * Set GPHY Power Down Override to leave GPHY |
4271 | * powered up in D0 uninitialized. |
4272 | */ |
4273 | if (BGE_IS_5705_PLUS(sc) && |
4274 | (sc->bge_flags & BGEF_CPMU_PRESENT) == 0) |
4275 | reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; |
4276 | |
4277 | /* Issue global reset */ |
4278 | write_op(sc, BGE_MISC_CFG, reset); |
4279 | |
4280 | /* 5718 reset step 7, 57XX step 8 */ |
4281 | if (sc->bge_flags & BGEF_PCIE) |
4282 | delay(100*1000); /* too big */ |
4283 | else |
4284 | delay(1000); |
4285 | |
4286 | if (sc->bge_flags & BGEF_PCIE) { |
4287 | if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { |
4288 | DELAY(500000); |
4289 | /* XXX: Magic Numbers */ |
4290 | reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
4291 | BGE_PCI_UNKNOWN0); |
4292 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, |
4293 | BGE_PCI_UNKNOWN0, |
4294 | reg | (1 << 15)); |
4295 | } |
4296 | devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
4297 | sc->bge_pciecap + PCIE_DCSR); |
4298 | /* Clear enable no snoop and disable relaxed ordering. */ |
4299 | devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD | |
4300 | PCIE_DCSR_ENA_NO_SNOOP); |
4301 | |
4302 | /* Set PCIE max payload size to 128 for older PCIe devices */ |
4303 | if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0) |
4304 | devctl &= ~(0x00e0); |
4305 | /* Clear device status register. Write 1b to clear */ |
4306 | devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED |
4307 | | PCIE_DCSR_NFED | PCIE_DCSR_CED; |
4308 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, |
4309 | sc->bge_pciecap + PCIE_DCSR, devctl); |
4310 | bge_set_max_readrq(sc); |
4311 | } |
4312 | |
4313 | /* From Linux: dummy read to flush PCI posted writes */ |
4314 | reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); |
4315 | |
4316 | /* |
4317 | * Reset some of the PCI state that got zapped by reset |
4318 | * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be |
4319 | * set, too. |
4320 | */ |
4321 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, |
4322 | BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | |
4323 | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); |
4324 | val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; |
4325 | if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && |
4326 | (sc->bge_flags & BGEF_PCIX) != 0) |
4327 | val |= BGE_PCISTATE_RETRY_SAME_DMA; |
4328 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) |
4329 | val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | |
4330 | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | |
4331 | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; |
4332 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val); |
4333 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); |
4334 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); |
4335 | |
4336 | /* 57xx step 11: disable PCI-X Relaxed Ordering. */ |
4337 | if (sc->bge_flags & BGEF_PCIX) { |
4338 | reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap |
4339 | + PCIX_CMD); |
4340 | /* Set max memory read byte count to 2K */ |
4341 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { |
4342 | reg &= ~PCIX_CMD_BYTECNT_MASK; |
4343 | reg |= PCIX_CMD_BCNT_2048; |
4344 | } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){ |
4345 | /* |
4346 | * For 5704, set max outstanding split transaction |
4347 | * field to 0 (0 means it supports 1 request) |
4348 | */ |
4349 | reg &= ~(PCIX_CMD_SPLTRANS_MASK |
4350 | | PCIX_CMD_BYTECNT_MASK); |
4351 | reg |= PCIX_CMD_BCNT_2048; |
4352 | } |
4353 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap |
4354 | + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER); |
4355 | } |
4356 | |
4357 | /* 5718 reset step 10, 57XX step 12 */ |
4358 | /* Enable memory arbiter. */ |
4359 | if (BGE_IS_5714_FAMILY(sc)) { |
4360 | val = CSR_READ_4(sc, BGE_MARB_MODE); |
4361 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); |
4362 | } else |
4363 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); |
4364 | |
4365 | /* XXX 5721, 5751 and 5752 */ |
4366 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { |
4367 | /* Step 19: */ |
4368 | BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); |
4369 | /* Step 20: */ |
4370 | BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); |
4371 | } |
4372 | |
4373 | /* 5718 reset step 12, 57XX step 15 and 16 */ |
4374 | /* Fix up byte swapping */ |
4375 | CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); |
4376 | |
4377 | /* 5718 reset step 13, 57XX step 17 */ |
4378 | /* Poll until the firmware initialization is complete */ |
4379 | bge_poll_fw(sc); |
4380 | |
4381 | /* 57XX step 21 */ |
4382 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { |
4383 | pcireg_t msidata; |
4384 | |
4385 | msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
4386 | BGE_PCI_MSI_DATA); |
4387 | msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); |
4388 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, |
4389 | msidata); |
4390 | } |
4391 | |
4392 | /* 57XX step 18 */ |
4393 | /* Write mac mode. */ |
4394 | val = CSR_READ_4(sc, BGE_MAC_MODE); |
4395 | /* Restore mac_mode_mask's bits using mac_mode */ |
4396 | val = (val & ~mac_mode_mask) | mac_mode; |
4397 | CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); |
4398 | DELAY(40); |
4399 | |
4400 | bge_ape_unlock(sc, BGE_APE_LOCK_GRC); |
4401 | |
4402 | /* |
4403 | * The 5704 in TBI mode apparently needs some special |
4404 | * adjustment to insure the SERDES drive level is set |
4405 | * to 1.2V. |
4406 | */ |
4407 | if (sc->bge_flags & BGEF_FIBER_TBI && |
4408 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { |
4409 | uint32_t serdescfg; |
4410 | |
4411 | serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); |
4412 | serdescfg = (serdescfg & ~0xFFF) | 0x880; |
4413 | CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); |
4414 | } |
4415 | |
4416 | if (sc->bge_flags & BGEF_PCIE && |
4417 | !BGE_IS_57765_PLUS(sc) && |
4418 | sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && |
4419 | BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { |
4420 | uint32_t v; |
4421 | |
4422 | /* Enable PCI Express bug fix */ |
4423 | v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG); |
4424 | CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG, |
4425 | v | BGE_TLP_DATA_FIFO_PROTECT); |
4426 | } |
4427 | |
4428 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) |
4429 | BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, |
4430 | CPMU_CLCK_ORIDE_MAC_ORIDE_EN); |
4431 | |
4432 | return 0; |
4433 | } |
4434 | |
4435 | /* |
4436 | * Frame reception handling. This is called if there's a frame |
4437 | * on the receive return list. |
4438 | * |
4439 | * Note: we have to be able to handle two possibilities here: |
4440 | * 1) the frame is from the jumbo receive ring |
4441 | * 2) the frame is from the standard receive ring |
4442 | */ |
4443 | |
4444 | static void |
4445 | bge_rxeof(struct bge_softc *sc) |
4446 | { |
4447 | struct ifnet *ifp; |
4448 | uint16_t rx_prod, rx_cons; |
4449 | int stdcnt = 0, jumbocnt = 0; |
4450 | bus_dmamap_t dmamap; |
4451 | bus_addr_t offset, toff; |
4452 | bus_size_t tlen; |
4453 | int tosync; |
4454 | |
4455 | rx_cons = sc->bge_rx_saved_considx; |
4456 | rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; |
4457 | |
4458 | /* Nothing to do */ |
4459 | if (rx_cons == rx_prod) |
4460 | return; |
4461 | |
4462 | ifp = &sc->ethercom.ec_if; |
4463 | |
4464 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4465 | offsetof(struct bge_ring_data, bge_status_block), |
4466 | sizeof (struct bge_status_block), |
4467 | BUS_DMASYNC_POSTREAD); |
4468 | |
4469 | offset = offsetof(struct bge_ring_data, bge_rx_return_ring); |
4470 | tosync = rx_prod - rx_cons; |
4471 | |
4472 | if (tosync != 0) |
4473 | rnd_add_uint32(&sc->rnd_source, tosync); |
4474 | |
4475 | toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); |
4476 | |
4477 | if (tosync < 0) { |
4478 | tlen = (sc->bge_return_ring_cnt - rx_cons) * |
4479 | sizeof (struct bge_rx_bd); |
4480 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4481 | toff, tlen, BUS_DMASYNC_POSTREAD); |
4482 | tosync = -tosync; |
4483 | } |
4484 | |
4485 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4486 | offset, tosync * sizeof (struct bge_rx_bd), |
4487 | BUS_DMASYNC_POSTREAD); |
4488 | |
4489 | while (rx_cons != rx_prod) { |
4490 | struct bge_rx_bd *cur_rx; |
4491 | uint32_t rxidx; |
4492 | struct mbuf *m = NULL; |
4493 | |
4494 | cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; |
4495 | |
4496 | rxidx = cur_rx->bge_idx; |
4497 | BGE_INC(rx_cons, sc->bge_return_ring_cnt); |
4498 | |
4499 | if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { |
4500 | BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); |
4501 | m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; |
4502 | sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; |
4503 | jumbocnt++; |
4504 | bus_dmamap_sync(sc->bge_dmatag, |
4505 | sc->bge_cdata.bge_rx_jumbo_map, |
4506 | mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, |
4507 | BGE_JLEN, BUS_DMASYNC_POSTREAD); |
4508 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { |
4509 | ifp->if_ierrors++; |
4510 | bge_newbuf_jumbo(sc, sc->bge_jumbo, m); |
4511 | continue; |
4512 | } |
4513 | if (bge_newbuf_jumbo(sc, sc->bge_jumbo, |
4514 | NULL)== ENOBUFS) { |
4515 | ifp->if_ierrors++; |
4516 | bge_newbuf_jumbo(sc, sc->bge_jumbo, m); |
4517 | continue; |
4518 | } |
4519 | } else { |
4520 | BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); |
4521 | m = sc->bge_cdata.bge_rx_std_chain[rxidx]; |
4522 | |
4523 | sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; |
4524 | stdcnt++; |
4525 | dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; |
4526 | sc->bge_cdata.bge_rx_std_map[rxidx] = 0; |
4527 | if (dmamap == NULL) { |
4528 | ifp->if_ierrors++; |
4529 | bge_newbuf_std(sc, sc->bge_std, m, dmamap); |
4530 | continue; |
4531 | } |
4532 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, |
4533 | dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
4534 | bus_dmamap_unload(sc->bge_dmatag, dmamap); |
4535 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { |
4536 | ifp->if_ierrors++; |
4537 | bge_newbuf_std(sc, sc->bge_std, m, dmamap); |
4538 | continue; |
4539 | } |
4540 | if (bge_newbuf_std(sc, sc->bge_std, |
4541 | NULL, dmamap) == ENOBUFS) { |
4542 | ifp->if_ierrors++; |
4543 | bge_newbuf_std(sc, sc->bge_std, m, dmamap); |
4544 | continue; |
4545 | } |
4546 | } |
4547 | |
4548 | ifp->if_ipackets++; |
4549 | #ifndef __NO_STRICT_ALIGNMENT |
4550 | /* |
4551 | * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, |
4552 | * the Rx buffer has the layer-2 header unaligned. |
4553 | * If our CPU requires alignment, re-align by copying. |
4554 | */ |
4555 | if (sc->bge_flags & BGEF_RX_ALIGNBUG) { |
4556 | memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, |
4557 | cur_rx->bge_len); |
4558 | m->m_data += ETHER_ALIGN; |
4559 | } |
4560 | #endif |
4561 | |
4562 | m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; |
4563 | m_set_rcvif(m, ifp); |
4564 | |
4565 | /* |
4566 | * Handle BPF listeners. Let the BPF user see the packet. |
4567 | */ |
4568 | bpf_mtap(ifp, m); |
4569 | |
4570 | bge_rxcsum(sc, cur_rx, m); |
4571 | |
4572 | /* |
4573 | * If we received a packet with a vlan tag, pass it |
4574 | * to vlan_input() instead of ether_input(). |
4575 | */ |
4576 | if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { |
4577 | VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); |
4578 | } |
4579 | |
4580 | if_percpuq_enqueue(ifp->if_percpuq, m); |
4581 | } |
4582 | |
4583 | sc->bge_rx_saved_considx = rx_cons; |
4584 | bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); |
4585 | if (stdcnt) |
4586 | bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); |
4587 | if (jumbocnt) |
4588 | bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); |
4589 | } |
4590 | |
4591 | static void |
4592 | bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) |
4593 | { |
4594 | |
4595 | if (BGE_IS_57765_PLUS(sc)) { |
4596 | if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { |
4597 | if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) |
4598 | m->m_pkthdr.csum_flags = M_CSUM_IPv4; |
4599 | if ((cur_rx->bge_error_flag & |
4600 | BGE_RXERRFLAG_IP_CSUM_NOK) != 0) |
4601 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; |
4602 | if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { |
4603 | m->m_pkthdr.csum_data = |
4604 | cur_rx->bge_tcp_udp_csum; |
4605 | m->m_pkthdr.csum_flags |= |
4606 | (M_CSUM_TCPv4|M_CSUM_UDPv4| |
4607 | M_CSUM_DATA); |
4608 | } |
4609 | } |
4610 | } else { |
4611 | if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) |
4612 | m->m_pkthdr.csum_flags = M_CSUM_IPv4; |
4613 | if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) |
4614 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; |
4615 | /* |
4616 | * Rx transport checksum-offload may also |
4617 | * have bugs with packets which, when transmitted, |
4618 | * were `runts' requiring padding. |
4619 | */ |
4620 | if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && |
4621 | (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ |
4622 | m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { |
4623 | m->m_pkthdr.csum_data = |
4624 | cur_rx->bge_tcp_udp_csum; |
4625 | m->m_pkthdr.csum_flags |= |
4626 | (M_CSUM_TCPv4|M_CSUM_UDPv4| |
4627 | M_CSUM_DATA); |
4628 | } |
4629 | } |
4630 | } |
4631 | |
4632 | static void |
4633 | bge_txeof(struct bge_softc *sc) |
4634 | { |
4635 | struct bge_tx_bd *cur_tx = NULL; |
4636 | struct ifnet *ifp; |
4637 | struct txdmamap_pool_entry *dma; |
4638 | bus_addr_t offset, toff; |
4639 | bus_size_t tlen; |
4640 | int tosync; |
4641 | struct mbuf *m; |
4642 | |
4643 | ifp = &sc->ethercom.ec_if; |
4644 | |
4645 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4646 | offsetof(struct bge_ring_data, bge_status_block), |
4647 | sizeof (struct bge_status_block), |
4648 | BUS_DMASYNC_POSTREAD); |
4649 | |
4650 | offset = offsetof(struct bge_ring_data, bge_tx_ring); |
4651 | tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - |
4652 | sc->bge_tx_saved_considx; |
4653 | |
4654 | if (tosync != 0) |
4655 | rnd_add_uint32(&sc->rnd_source, tosync); |
4656 | |
4657 | toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); |
4658 | |
4659 | if (tosync < 0) { |
4660 | tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * |
4661 | sizeof (struct bge_tx_bd); |
4662 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4663 | toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
4664 | tosync = -tosync; |
4665 | } |
4666 | |
4667 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4668 | offset, tosync * sizeof (struct bge_tx_bd), |
4669 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
4670 | |
4671 | /* |
4672 | * Go through our tx ring and free mbufs for those |
4673 | * frames that have been sent. |
4674 | */ |
4675 | while (sc->bge_tx_saved_considx != |
4676 | sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { |
4677 | uint32_t idx = 0; |
4678 | |
4679 | idx = sc->bge_tx_saved_considx; |
4680 | cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; |
4681 | if (cur_tx->bge_flags & BGE_TXBDFLAG_END) |
4682 | ifp->if_opackets++; |
4683 | m = sc->bge_cdata.bge_tx_chain[idx]; |
4684 | if (m != NULL) { |
4685 | sc->bge_cdata.bge_tx_chain[idx] = NULL; |
4686 | dma = sc->txdma[idx]; |
4687 | bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, |
4688 | dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
4689 | bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); |
4690 | SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); |
4691 | sc->txdma[idx] = NULL; |
4692 | |
4693 | m_freem(m); |
4694 | } |
4695 | sc->bge_txcnt--; |
4696 | BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); |
4697 | ifp->if_timer = 0; |
4698 | } |
4699 | |
4700 | if (cur_tx != NULL) |
4701 | ifp->if_flags &= ~IFF_OACTIVE; |
4702 | } |
4703 | |
4704 | static int |
4705 | bge_intr(void *xsc) |
4706 | { |
4707 | struct bge_softc *sc; |
4708 | struct ifnet *ifp; |
4709 | uint32_t pcistate, statusword, statustag; |
4710 | uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE; |
4711 | |
4712 | sc = xsc; |
4713 | ifp = &sc->ethercom.ec_if; |
4714 | |
4715 | /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */ |
4716 | if (BGE_IS_5717_PLUS(sc)) |
4717 | intrmask = 0; |
4718 | |
4719 | /* It is possible for the interrupt to arrive before |
4720 | * the status block is updated prior to the interrupt. |
4721 | * Reading the PCI State register will confirm whether the |
4722 | * interrupt is ours and will flush the status block. |
4723 | */ |
4724 | pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE); |
4725 | |
4726 | /* read status word from status block */ |
4727 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4728 | offsetof(struct bge_ring_data, bge_status_block), |
4729 | sizeof (struct bge_status_block), |
4730 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
4731 | statusword = sc->bge_rdata->bge_status_block.bge_status; |
4732 | statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; |
4733 | |
4734 | if (sc->bge_flags & BGEF_TAGGED_STATUS) { |
4735 | if (sc->bge_lasttag == statustag && |
4736 | (~pcistate & intrmask)) { |
4737 | return (0); |
4738 | } |
4739 | sc->bge_lasttag = statustag; |
4740 | } else { |
4741 | if (!(statusword & BGE_STATFLAG_UPDATED) && |
4742 | !(~pcistate & intrmask)) { |
4743 | return (0); |
4744 | } |
4745 | statustag = 0; |
4746 | } |
4747 | /* Ack interrupt and stop others from occurring. */ |
4748 | bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); |
4749 | BGE_EVCNT_INCR(sc->bge_ev_intr); |
4750 | |
4751 | /* clear status word */ |
4752 | sc->bge_rdata->bge_status_block.bge_status = 0; |
4753 | |
4754 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, |
4755 | offsetof(struct bge_ring_data, bge_status_block), |
4756 | sizeof (struct bge_status_block), |
4757 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
4758 | |
4759 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || |
4760 | statusword & BGE_STATFLAG_LINKSTATE_CHANGED || |
4761 | BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) |
4762 | bge_link_upd(sc); |
4763 | |
4764 | if (ifp->if_flags & IFF_RUNNING) { |
4765 | /* Check RX return ring producer/consumer */ |
4766 | bge_rxeof(sc); |
4767 | |
4768 | /* Check TX ring producer/consumer */ |
4769 | bge_txeof(sc); |
4770 | } |
4771 | |
4772 | if (sc->bge_pending_rxintr_change) { |
4773 | uint32_t rx_ticks = sc->bge_rx_coal_ticks; |
4774 | uint32_t rx_bds = sc->bge_rx_max_coal_bds; |
4775 | |
4776 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); |
4777 | DELAY(10); |
4778 | (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); |
4779 | |
4780 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); |
4781 | DELAY(10); |
4782 | (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); |
4783 | |
4784 | sc->bge_pending_rxintr_change = 0; |
4785 | } |
4786 | bge_handle_events(sc); |
4787 | |
4788 | /* Re-enable interrupts. */ |
4789 | bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag); |
4790 | |
4791 | if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) |
4792 | bge_start(ifp); |
4793 | |
4794 | return 1; |
4795 | } |
4796 | |
4797 | static void |
4798 | bge_asf_driver_up(struct bge_softc *sc) |
4799 | { |
4800 | if (sc->bge_asf_mode & ASF_STACKUP) { |
4801 | /* Send ASF heartbeat aprox. every 2s */ |
4802 | if (sc->bge_asf_count) |
4803 | sc->bge_asf_count --; |
4804 | else { |
4805 | sc->bge_asf_count = 2; |
4806 | |
4807 | bge_wait_for_event_ack(sc); |
4808 | |
4809 | bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, |
4810 | BGE_FW_CMD_DRV_ALIVE3); |
4811 | bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); |
4812 | bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, |
4813 | BGE_FW_HB_TIMEOUT_SEC); |
4814 | CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, |
4815 | CSR_READ_4(sc, BGE_RX_CPU_EVENT) | |
4816 | BGE_RX_CPU_DRV_EVENT); |
4817 | } |
4818 | } |
4819 | } |
4820 | |
4821 | static void |
4822 | bge_tick(void *xsc) |
4823 | { |
4824 | struct bge_softc *sc = xsc; |
4825 | struct mii_data *mii = &sc->bge_mii; |
4826 | int s; |
4827 | |
4828 | s = splnet(); |
4829 | |
4830 | if (BGE_IS_5705_PLUS(sc)) |
4831 | bge_stats_update_regs(sc); |
4832 | else |
4833 | bge_stats_update(sc); |
4834 | |
4835 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
4836 | /* |
4837 | * Since in TBI mode auto-polling can't be used we should poll |
4838 | * link status manually. Here we register pending link event |
4839 | * and trigger interrupt. |
4840 | */ |
4841 | BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); |
4842 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); |
4843 | } else { |
4844 | /* |
4845 | * Do not touch PHY if we have link up. This could break |
4846 | * IPMI/ASF mode or produce extra input errors. |
4847 | * (extra input errors was reported for bcm5701 & bcm5704). |
4848 | */ |
4849 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)) |
4850 | mii_tick(mii); |
4851 | } |
4852 | |
4853 | bge_asf_driver_up(sc); |
4854 | |
4855 | if (!sc->bge_detaching) |
4856 | callout_reset(&sc->bge_timeout, hz, bge_tick, sc); |
4857 | |
4858 | splx(s); |
4859 | } |
4860 | |
4861 | static void |
4862 | bge_stats_update_regs(struct bge_softc *sc) |
4863 | { |
4864 | struct ifnet *ifp = &sc->ethercom.ec_if; |
4865 | |
4866 | ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + |
4867 | offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); |
4868 | |
4869 | ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); |
4870 | ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); |
4871 | ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); |
4872 | } |
4873 | |
4874 | static void |
4875 | bge_stats_update(struct bge_softc *sc) |
4876 | { |
4877 | struct ifnet *ifp = &sc->ethercom.ec_if; |
4878 | bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; |
4879 | |
4880 | #define READ_STAT(sc, stats, stat) \ |
4881 | CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) |
4882 | |
4883 | ifp->if_collisions += |
4884 | (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + |
4885 | READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + |
4886 | READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + |
4887 | READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - |
4888 | ifp->if_collisions; |
4889 | |
4890 | BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, |
4891 | READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); |
4892 | BGE_EVCNT_UPD(sc->bge_ev_tx_xon, |
4893 | READ_STAT(sc, stats, outXonSent.bge_addr_lo)); |
4894 | BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, |
4895 | READ_STAT(sc, stats, |
4896 | xoffPauseFramesReceived.bge_addr_lo)); |
4897 | BGE_EVCNT_UPD(sc->bge_ev_rx_xon, |
4898 | READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); |
4899 | BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, |
4900 | READ_STAT(sc, stats, |
4901 | macControlFramesReceived.bge_addr_lo)); |
4902 | BGE_EVCNT_UPD(sc->bge_ev_xoffentered, |
4903 | READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); |
4904 | |
4905 | #undef READ_STAT |
4906 | |
4907 | #ifdef notdef |
4908 | ifp->if_collisions += |
4909 | (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + |
4910 | sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + |
4911 | sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + |
4912 | sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - |
4913 | ifp->if_collisions; |
4914 | #endif |
4915 | } |
4916 | |
4917 | /* |
4918 | * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. |
4919 | * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, |
4920 | * but when such padded frames employ the bge IP/TCP checksum offload, |
4921 | * the hardware checksum assist gives incorrect results (possibly |
4922 | * from incorporating its own padding into the UDP/TCP checksum; who knows). |
4923 | * If we pad such runts with zeros, the onboard checksum comes out correct. |
4924 | */ |
4925 | static inline int |
4926 | bge_cksum_pad(struct mbuf *pkt) |
4927 | { |
4928 | struct mbuf *last = NULL; |
4929 | int padlen; |
4930 | |
4931 | padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; |
4932 | |
4933 | /* if there's only the packet-header and we can pad there, use it. */ |
4934 | if (pkt->m_pkthdr.len == pkt->m_len && |
4935 | M_TRAILINGSPACE(pkt) >= padlen) { |
4936 | last = pkt; |
4937 | } else { |
4938 | /* |
4939 | * Walk packet chain to find last mbuf. We will either |
4940 | * pad there, or append a new mbuf and pad it |
4941 | * (thus perhaps avoiding the bcm5700 dma-min bug). |
4942 | */ |
4943 | for (last = pkt; last->m_next != NULL; last = last->m_next) { |
4944 | continue; /* do nothing */ |
4945 | } |
4946 | |
4947 | /* `last' now points to last in chain. */ |
4948 | if (M_TRAILINGSPACE(last) < padlen) { |
4949 | /* Allocate new empty mbuf, pad it. Compact later. */ |
4950 | struct mbuf *n; |
4951 | MGET(n, M_DONTWAIT, MT_DATA); |
4952 | if (n == NULL) |
4953 | return ENOBUFS; |
4954 | n->m_len = 0; |
4955 | last->m_next = n; |
4956 | last = n; |
4957 | } |
4958 | } |
4959 | |
4960 | KDASSERT(!M_READONLY(last)); |
4961 | KDASSERT(M_TRAILINGSPACE(last) >= padlen); |
4962 | |
4963 | /* Now zero the pad area, to avoid the bge cksum-assist bug */ |
4964 | memset(mtod(last, char *) + last->m_len, 0, padlen); |
4965 | last->m_len += padlen; |
4966 | pkt->m_pkthdr.len += padlen; |
4967 | return 0; |
4968 | } |
4969 | |
4970 | /* |
4971 | * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. |
4972 | */ |
4973 | static inline int |
4974 | bge_compact_dma_runt(struct mbuf *pkt) |
4975 | { |
4976 | struct mbuf *m, *prev; |
4977 | int totlen; |
4978 | |
4979 | prev = NULL; |
4980 | totlen = 0; |
4981 | |
4982 | for (m = pkt; m != NULL; prev = m,m = m->m_next) { |
4983 | int mlen = m->m_len; |
4984 | int shortfall = 8 - mlen ; |
4985 | |
4986 | totlen += mlen; |
4987 | if (mlen == 0) |
4988 | continue; |
4989 | if (mlen >= 8) |
4990 | continue; |
4991 | |
4992 | /* If we get here, mbuf data is too small for DMA engine. |
4993 | * Try to fix by shuffling data to prev or next in chain. |
4994 | * If that fails, do a compacting deep-copy of the whole chain. |
4995 | */ |
4996 | |
4997 | /* Internal frag. If fits in prev, copy it there. */ |
4998 | if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { |
4999 | memcpy(prev->m_data + prev->m_len, m->m_data, mlen); |
5000 | prev->m_len += mlen; |
5001 | m->m_len = 0; |
5002 | /* XXX stitch chain */ |
5003 | prev->m_next = m_free(m); |
5004 | m = prev; |
5005 | continue; |
5006 | } |
5007 | else if (m->m_next != NULL && |
5008 | M_TRAILINGSPACE(m) >= shortfall && |
5009 | m->m_next->m_len >= (8 + shortfall)) { |
5010 | /* m is writable and have enough data in next, pull up. */ |
5011 | |
5012 | memcpy(m->m_data + m->m_len, m->m_next->m_data, |
5013 | shortfall); |
5014 | m->m_len += shortfall; |
5015 | m->m_next->m_len -= shortfall; |
5016 | m->m_next->m_data += shortfall; |
5017 | } |
5018 | else if (m->m_next == NULL || 1) { |
5019 | /* Got a runt at the very end of the packet. |
5020 | * borrow data from the tail of the preceding mbuf and |
5021 | * update its length in-place. (The original data is still |
5022 | * valid, so we can do this even if prev is not writable.) |
5023 | */ |
5024 | |
5025 | /* if we'd make prev a runt, just move all of its data. */ |
5026 | KASSERT(prev != NULL /*, ("runt but null PREV")*/); |
5027 | KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); |
5028 | |
5029 | if ((prev->m_len - shortfall) < 8) |
5030 | shortfall = prev->m_len; |
5031 | |
5032 | #ifdef notyet /* just do the safe slow thing for now */ |
5033 | if (!M_READONLY(m)) { |
5034 | if (M_LEADINGSPACE(m) < shorfall) { |
5035 | void *m_dat; |
5036 | m_dat = (m->m_flags & M_PKTHDR) ? |
5037 | m->m_pktdat : m->dat; |
5038 | memmove(m_dat, mtod(m, void*), m->m_len); |
5039 | m->m_data = m_dat; |
5040 | } |
5041 | } else |
5042 | #endif /* just do the safe slow thing */ |
5043 | { |
5044 | struct mbuf * n = NULL; |
5045 | int newprevlen = prev->m_len - shortfall; |
5046 | |
5047 | MGET(n, M_NOWAIT, MT_DATA); |
5048 | if (n == NULL) |
5049 | return ENOBUFS; |
5050 | KASSERT(m->m_len + shortfall < MLEN |
5051 | /*, |
5052 | ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); |
5053 | |
5054 | /* first copy the data we're stealing from prev */ |
5055 | memcpy(n->m_data, prev->m_data + newprevlen, |
5056 | shortfall); |
5057 | |
5058 | /* update prev->m_len accordingly */ |
5059 | prev->m_len -= shortfall; |
5060 | |
5061 | /* copy data from runt m */ |
5062 | memcpy(n->m_data + shortfall, m->m_data, |
5063 | m->m_len); |
5064 | |
5065 | /* n holds what we stole from prev, plus m */ |
5066 | n->m_len = shortfall + m->m_len; |
5067 | |
5068 | /* stitch n into chain and free m */ |
5069 | n->m_next = m->m_next; |
5070 | prev->m_next = n; |
5071 | /* KASSERT(m->m_next == NULL); */ |
5072 | m->m_next = NULL; |
5073 | m_free(m); |
5074 | m = n; /* for continuing loop */ |
5075 | } |
5076 | } |
5077 | } |
5078 | return 0; |
5079 | } |
5080 | |
5081 | /* |
5082 | * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data |
5083 | * pointers to descriptors. |
5084 | */ |
5085 | static int |
5086 | bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) |
5087 | { |
5088 | struct bge_tx_bd *f = NULL; |
5089 | uint32_t frag, cur; |
5090 | uint16_t csum_flags = 0; |
5091 | uint16_t txbd_tso_flags = 0; |
5092 | struct txdmamap_pool_entry *dma; |
5093 | bus_dmamap_t dmamap; |
5094 | int i = 0; |
5095 | struct m_tag *mtag; |
5096 | int use_tso, maxsegsize, error; |
5097 | |
5098 | cur = frag = *txidx; |
5099 | |
5100 | if (m_head->m_pkthdr.csum_flags) { |
5101 | if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) |
5102 | csum_flags |= BGE_TXBDFLAG_IP_CSUM; |
5103 | if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) |
5104 | csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; |
5105 | } |
5106 | |
5107 | /* |
5108 | * If we were asked to do an outboard checksum, and the NIC |
5109 | * has the bug where it sometimes adds in the Ethernet padding, |
5110 | * explicitly pad with zeros so the cksum will be correct either way. |
5111 | * (For now, do this for all chip versions, until newer |
5112 | * are confirmed to not require the workaround.) |
5113 | */ |
5114 | if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || |
5115 | #ifdef notyet |
5116 | (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || |
5117 | #endif |
5118 | m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) |
5119 | goto check_dma_bug; |
5120 | |
5121 | if (bge_cksum_pad(m_head) != 0) |
5122 | return ENOBUFS; |
5123 | |
5124 | check_dma_bug: |
5125 | if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) |
5126 | goto doit; |
5127 | |
5128 | /* |
5129 | * bcm5700 Revision B silicon cannot handle DMA descriptors with |
5130 | * less than eight bytes. If we encounter a teeny mbuf |
5131 | * at the end of a chain, we can pad. Otherwise, copy. |
5132 | */ |
5133 | if (bge_compact_dma_runt(m_head) != 0) |
5134 | return ENOBUFS; |
5135 | |
5136 | doit: |
5137 | dma = SLIST_FIRST(&sc->txdma_list); |
5138 | if (dma == NULL) |
5139 | return ENOBUFS; |
5140 | dmamap = dma->dmamap; |
5141 | |
5142 | /* |
5143 | * Set up any necessary TSO state before we start packing... |
5144 | */ |
5145 | use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; |
5146 | if (!use_tso) { |
5147 | maxsegsize = 0; |
5148 | } else { /* TSO setup */ |
5149 | unsigned mss; |
5150 | struct ether_header *eh; |
5151 | unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; |
5152 | struct mbuf * m0 = m_head; |
5153 | struct ip *ip; |
5154 | struct tcphdr *th; |
5155 | int iphl, hlen; |
5156 | |
5157 | /* |
5158 | * XXX It would be nice if the mbuf pkthdr had offset |
5159 | * fields for the protocol headers. |
5160 | */ |
5161 | |
5162 | eh = mtod(m0, struct ether_header *); |
5163 | switch (htons(eh->ether_type)) { |
5164 | case ETHERTYPE_IP: |
5165 | offset = ETHER_HDR_LEN; |
5166 | break; |
5167 | |
5168 | case ETHERTYPE_VLAN: |
5169 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
5170 | break; |
5171 | |
5172 | default: |
5173 | /* |
5174 | * Don't support this protocol or encapsulation. |
5175 | */ |
5176 | return ENOBUFS; |
5177 | } |
5178 | |
5179 | /* |
5180 | * TCP/IP headers are in the first mbuf; we can do |
5181 | * this the easy way. |
5182 | */ |
5183 | iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); |
5184 | hlen = iphl + offset; |
5185 | if (__predict_false(m0->m_len < |
5186 | (hlen + sizeof(struct tcphdr)))) { |
5187 | |
5188 | aprint_debug_dev(sc->bge_dev, |
5189 | "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," |
5190 | "not handled yet\n" , |
5191 | m0->m_len, hlen+ sizeof(struct tcphdr)); |
5192 | #ifdef NOTYET |
5193 | /* |
5194 | * XXX jonathan@NetBSD.org: untested. |
5195 | * how to force this branch to be taken? |
5196 | */ |
5197 | BGE_EVCNT_INCR(sc->bge_ev_txtsopain); |
5198 | |
5199 | m_copydata(m0, offset, sizeof(ip), &ip); |
5200 | m_copydata(m0, hlen, sizeof(th), &th); |
5201 | |
5202 | ip.ip_len = 0; |
5203 | |
5204 | m_copyback(m0, hlen + offsetof(struct ip, ip_len), |
5205 | sizeof(ip.ip_len), &ip.ip_len); |
5206 | |
5207 | th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, |
5208 | ip.ip_dst.s_addr, htons(IPPROTO_TCP)); |
5209 | |
5210 | m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), |
5211 | sizeof(th.th_sum), &th.th_sum); |
5212 | |
5213 | hlen += th.th_off << 2; |
5214 | iptcp_opt_words = hlen; |
5215 | #else |
5216 | /* |
5217 | * if_wm "hard" case not yet supported, can we not |
5218 | * mandate it out of existence? |
5219 | */ |
5220 | (void) ip; (void)th; (void) ip_tcp_hlen; |
5221 | |
5222 | return ENOBUFS; |
5223 | #endif |
5224 | } else { |
5225 | ip = (struct ip *) (mtod(m0, char *) + offset); |
5226 | th = (struct tcphdr *) (mtod(m0, char *) + hlen); |
5227 | ip_tcp_hlen = iphl + (th->th_off << 2); |
5228 | |
5229 | /* Total IP/TCP options, in 32-bit words */ |
5230 | iptcp_opt_words = (ip_tcp_hlen |
5231 | - sizeof(struct tcphdr) |
5232 | - sizeof(struct ip)) >> 2; |
5233 | } |
5234 | if (BGE_IS_575X_PLUS(sc)) { |
5235 | th->th_sum = 0; |
5236 | csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); |
5237 | } else { |
5238 | /* |
5239 | * XXX jonathan@NetBSD.org: 5705 untested. |
5240 | * Requires TSO firmware patch for 5701/5703/5704. |
5241 | */ |
5242 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, |
5243 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
5244 | } |
5245 | |
5246 | mss = m_head->m_pkthdr.segsz; |
5247 | txbd_tso_flags |= |
5248 | BGE_TXBDFLAG_CPU_PRE_DMA | |
5249 | BGE_TXBDFLAG_CPU_POST_DMA; |
5250 | |
5251 | /* |
5252 | * Our NIC TSO-assist assumes TSO has standard, optionless |
5253 | * IPv4 and TCP headers, which total 40 bytes. By default, |
5254 | * the NIC copies 40 bytes of IP/TCP header from the |
5255 | * supplied header into the IP/TCP header portion of |
5256 | * each post-TSO-segment. If the supplied packet has IP or |
5257 | * TCP options, we need to tell the NIC to copy those extra |
5258 | * bytes into each post-TSO header, in addition to the normal |
5259 | * 40-byte IP/TCP header (and to leave space accordingly). |
5260 | * Unfortunately, the driver encoding of option length |
5261 | * varies across different ASIC families. |
5262 | */ |
5263 | tcp_seg_flags = 0; |
5264 | if (iptcp_opt_words) { |
5265 | if (BGE_IS_5705_PLUS(sc)) { |
5266 | tcp_seg_flags = |
5267 | iptcp_opt_words << 11; |
5268 | } else { |
5269 | txbd_tso_flags |= |
5270 | iptcp_opt_words << 12; |
5271 | } |
5272 | } |
5273 | maxsegsize = mss | tcp_seg_flags; |
5274 | ip->ip_len = htons(mss + ip_tcp_hlen); |
5275 | |
5276 | } /* TSO setup */ |
5277 | |
5278 | /* |
5279 | * Start packing the mbufs in this chain into |
5280 | * the fragment pointers. Stop when we run out |
5281 | * of fragments or hit the end of the mbuf chain. |
5282 | */ |
5283 | error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, |
5284 | BUS_DMA_NOWAIT); |
5285 | if (error) |
5286 | return ENOBUFS; |
5287 | /* |
5288 | * Sanity check: avoid coming within 16 descriptors |
5289 | * of the end of the ring. |
5290 | */ |
5291 | if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { |
5292 | BGE_TSO_PRINTF(("%s: " |
5293 | " dmamap_load_mbuf too close to ring wrap\n" , |
5294 | device_xname(sc->bge_dev))); |
5295 | goto fail_unload; |
5296 | } |
5297 | |
5298 | mtag = sc->ethercom.ec_nvlans ? |
5299 | m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; |
5300 | |
5301 | |
5302 | /* Iterate over dmap-map fragments. */ |
5303 | for (i = 0; i < dmamap->dm_nsegs; i++) { |
5304 | f = &sc->bge_rdata->bge_tx_ring[frag]; |
5305 | if (sc->bge_cdata.bge_tx_chain[frag] != NULL) |
5306 | break; |
5307 | |
5308 | BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); |
5309 | f->bge_len = dmamap->dm_segs[i].ds_len; |
5310 | |
5311 | /* |
5312 | * For 5751 and follow-ons, for TSO we must turn |
5313 | * off checksum-assist flag in the tx-descr, and |
5314 | * supply the ASIC-revision-specific encoding |
5315 | * of TSO flags and segsize. |
5316 | */ |
5317 | if (use_tso) { |
5318 | if (BGE_IS_575X_PLUS(sc) || i == 0) { |
5319 | f->bge_rsvd = maxsegsize; |
5320 | f->bge_flags = csum_flags | txbd_tso_flags; |
5321 | } else { |
5322 | f->bge_rsvd = 0; |
5323 | f->bge_flags = |
5324 | (csum_flags | txbd_tso_flags) & 0x0fff; |
5325 | } |
5326 | } else { |
5327 | f->bge_rsvd = 0; |
5328 | f->bge_flags = csum_flags; |
5329 | } |
5330 | |
5331 | if (mtag != NULL) { |
5332 | f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; |
5333 | f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); |
5334 | } else { |
5335 | f->bge_vlan_tag = 0; |
5336 | } |
5337 | cur = frag; |
5338 | BGE_INC(frag, BGE_TX_RING_CNT); |
5339 | } |
5340 | |
5341 | if (i < dmamap->dm_nsegs) { |
5342 | BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n" , |
5343 | device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); |
5344 | goto fail_unload; |
5345 | } |
5346 | |
5347 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, |
5348 | BUS_DMASYNC_PREWRITE); |
5349 | |
5350 | if (frag == sc->bge_tx_saved_considx) { |
5351 | BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n" , |
5352 | device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); |
5353 | |
5354 | goto fail_unload; |
5355 | } |
5356 | |
5357 | sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; |
5358 | sc->bge_cdata.bge_tx_chain[cur] = m_head; |
5359 | SLIST_REMOVE_HEAD(&sc->txdma_list, link); |
5360 | sc->txdma[cur] = dma; |
5361 | sc->bge_txcnt += dmamap->dm_nsegs; |
5362 | |
5363 | *txidx = frag; |
5364 | |
5365 | return 0; |
5366 | |
5367 | fail_unload: |
5368 | bus_dmamap_unload(sc->bge_dmatag, dmamap); |
5369 | |
5370 | return ENOBUFS; |
5371 | } |
5372 | |
5373 | /* |
5374 | * Main transmit routine. To avoid having to do mbuf copies, we put pointers |
5375 | * to the mbuf data regions directly in the transmit descriptors. |
5376 | */ |
5377 | static void |
5378 | bge_start(struct ifnet *ifp) |
5379 | { |
5380 | struct bge_softc *sc; |
5381 | struct mbuf *m_head = NULL; |
5382 | uint32_t prodidx; |
5383 | int pkts = 0; |
5384 | |
5385 | sc = ifp->if_softc; |
5386 | |
5387 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) |
5388 | return; |
5389 | |
5390 | prodidx = sc->bge_tx_prodidx; |
5391 | |
5392 | while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { |
5393 | IFQ_POLL(&ifp->if_snd, m_head); |
5394 | if (m_head == NULL) |
5395 | break; |
5396 | |
5397 | #if 0 |
5398 | /* |
5399 | * XXX |
5400 | * safety overkill. If this is a fragmented packet chain |
5401 | * with delayed TCP/UDP checksums, then only encapsulate |
5402 | * it if we have enough descriptors to handle the entire |
5403 | * chain at once. |
5404 | * (paranoia -- may not actually be needed) |
5405 | */ |
5406 | if (m_head->m_flags & M_FIRSTFRAG && |
5407 | m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { |
5408 | if ((BGE_TX_RING_CNT - sc->bge_txcnt) < |
5409 | M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { |
5410 | ifp->if_flags |= IFF_OACTIVE; |
5411 | break; |
5412 | } |
5413 | } |
5414 | #endif |
5415 | |
5416 | /* |
5417 | * Pack the data into the transmit ring. If we |
5418 | * don't have room, set the OACTIVE flag and wait |
5419 | * for the NIC to drain the ring. |
5420 | */ |
5421 | if (bge_encap(sc, m_head, &prodidx)) { |
5422 | ifp->if_flags |= IFF_OACTIVE; |
5423 | break; |
5424 | } |
5425 | |
5426 | /* now we are committed to transmit the packet */ |
5427 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
5428 | pkts++; |
5429 | |
5430 | /* |
5431 | * If there's a BPF listener, bounce a copy of this frame |
5432 | * to him. |
5433 | */ |
5434 | bpf_mtap(ifp, m_head); |
5435 | } |
5436 | if (pkts == 0) |
5437 | return; |
5438 | |
5439 | /* Transmit */ |
5440 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); |
5441 | /* 5700 b2 errata */ |
5442 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) |
5443 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); |
5444 | |
5445 | sc->bge_tx_prodidx = prodidx; |
5446 | |
5447 | /* |
5448 | * Set a timeout in case the chip goes out to lunch. |
5449 | */ |
5450 | ifp->if_timer = 5; |
5451 | } |
5452 | |
5453 | static int |
5454 | bge_init(struct ifnet *ifp) |
5455 | { |
5456 | struct bge_softc *sc = ifp->if_softc; |
5457 | const uint16_t *m; |
5458 | uint32_t mode, reg; |
5459 | int s, error = 0; |
5460 | |
5461 | s = splnet(); |
5462 | |
5463 | ifp = &sc->ethercom.ec_if; |
5464 | |
5465 | /* Cancel pending I/O and flush buffers. */ |
5466 | bge_stop(ifp, 0); |
5467 | |
5468 | bge_stop_fw(sc); |
5469 | bge_sig_pre_reset(sc, BGE_RESET_START); |
5470 | bge_reset(sc); |
5471 | bge_sig_legacy(sc, BGE_RESET_START); |
5472 | |
5473 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { |
5474 | reg = CSR_READ_4(sc, BGE_CPMU_CTRL); |
5475 | reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE | |
5476 | BGE_CPMU_CTRL_LINK_IDLE_MODE); |
5477 | CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); |
5478 | |
5479 | reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); |
5480 | reg &= ~BGE_CPMU_LSPD_10MB_CLK; |
5481 | reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; |
5482 | CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); |
5483 | |
5484 | reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD); |
5485 | reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK; |
5486 | reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25; |
5487 | CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg); |
5488 | |
5489 | reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC); |
5490 | reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK; |
5491 | reg |= BGE_CPMU_HST_ACC_MACCLK_6_25; |
5492 | CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg); |
5493 | } |
5494 | |
5495 | bge_sig_post_reset(sc, BGE_RESET_START); |
5496 | |
5497 | bge_chipinit(sc); |
5498 | |
5499 | /* |
5500 | * Init the various state machines, ring |
5501 | * control blocks and firmware. |
5502 | */ |
5503 | error = bge_blockinit(sc); |
5504 | if (error != 0) { |
5505 | aprint_error_dev(sc->bge_dev, "initialization error %d\n" , |
5506 | error); |
5507 | splx(s); |
5508 | return error; |
5509 | } |
5510 | |
5511 | ifp = &sc->ethercom.ec_if; |
5512 | |
5513 | /* 5718 step 25, 57XX step 54 */ |
5514 | /* Specify MTU. */ |
5515 | CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + |
5516 | ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); |
5517 | |
5518 | /* 5718 step 23 */ |
5519 | /* Load our MAC address. */ |
5520 | m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); |
5521 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); |
5522 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); |
5523 | |
5524 | /* Enable or disable promiscuous mode as needed. */ |
5525 | if (ifp->if_flags & IFF_PROMISC) |
5526 | BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); |
5527 | else |
5528 | BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); |
5529 | |
5530 | /* Program multicast filter. */ |
5531 | bge_setmulti(sc); |
5532 | |
5533 | /* Init RX ring. */ |
5534 | bge_init_rx_ring_std(sc); |
5535 | |
5536 | /* |
5537 | * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's |
5538 | * memory to insure that the chip has in fact read the first |
5539 | * entry of the ring. |
5540 | */ |
5541 | if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { |
5542 | uint32_t v, i; |
5543 | for (i = 0; i < 10; i++) { |
5544 | DELAY(20); |
5545 | v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); |
5546 | if (v == (MCLBYTES - ETHER_ALIGN)) |
5547 | break; |
5548 | } |
5549 | if (i == 10) |
5550 | aprint_error_dev(sc->bge_dev, |
5551 | "5705 A0 chip failed to load RX ring\n" ); |
5552 | } |
5553 | |
5554 | /* Init jumbo RX ring. */ |
5555 | if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) |
5556 | bge_init_rx_ring_jumbo(sc); |
5557 | |
5558 | /* Init our RX return ring index */ |
5559 | sc->bge_rx_saved_considx = 0; |
5560 | |
5561 | /* Init TX ring. */ |
5562 | bge_init_tx_ring(sc); |
5563 | |
5564 | /* 5718 step 63, 57XX step 94 */ |
5565 | /* Enable TX MAC state machine lockup fix. */ |
5566 | mode = CSR_READ_4(sc, BGE_TX_MODE); |
5567 | if (BGE_IS_5755_PLUS(sc) || |
5568 | BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) |
5569 | mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; |
5570 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { |
5571 | mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); |
5572 | mode |= CSR_READ_4(sc, BGE_TX_MODE) & |
5573 | (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); |
5574 | } |
5575 | |
5576 | /* Turn on transmitter */ |
5577 | CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); |
5578 | /* 5718 step 64 */ |
5579 | DELAY(100); |
5580 | |
5581 | /* 5718 step 65, 57XX step 95 */ |
5582 | /* Turn on receiver */ |
5583 | mode = CSR_READ_4(sc, BGE_RX_MODE); |
5584 | if (BGE_IS_5755_PLUS(sc)) |
5585 | mode |= BGE_RXMODE_IPV6_ENABLE; |
5586 | CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); |
5587 | /* 5718 step 66 */ |
5588 | DELAY(10); |
5589 | |
5590 | /* 5718 step 12, 57XX step 37 */ |
5591 | /* |
5592 | * XXX Doucments of 5718 series and 577xx say the recommended value |
5593 | * is 1, but tg3 set 1 only on 57765 series. |
5594 | */ |
5595 | if (BGE_IS_57765_PLUS(sc)) |
5596 | reg = 1; |
5597 | else |
5598 | reg = 2; |
5599 | CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg); |
5600 | |
5601 | /* Tell firmware we're alive. */ |
5602 | BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); |
5603 | |
5604 | /* Enable host interrupts. */ |
5605 | BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); |
5606 | BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); |
5607 | bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0); |
5608 | |
5609 | if ((error = bge_ifmedia_upd(ifp)) != 0) |
5610 | goto out; |
5611 | |
5612 | ifp->if_flags |= IFF_RUNNING; |
5613 | ifp->if_flags &= ~IFF_OACTIVE; |
5614 | |
5615 | callout_reset(&sc->bge_timeout, hz, bge_tick, sc); |
5616 | |
5617 | out: |
5618 | sc->bge_if_flags = ifp->if_flags; |
5619 | splx(s); |
5620 | |
5621 | return error; |
5622 | } |
5623 | |
5624 | /* |
5625 | * Set media options. |
5626 | */ |
5627 | static int |
5628 | bge_ifmedia_upd(struct ifnet *ifp) |
5629 | { |
5630 | struct bge_softc *sc = ifp->if_softc; |
5631 | struct mii_data *mii = &sc->bge_mii; |
5632 | struct ifmedia *ifm = &sc->bge_ifmedia; |
5633 | int rc; |
5634 | |
5635 | /* If this is a 1000baseX NIC, enable the TBI port. */ |
5636 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
5637 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
5638 | return EINVAL; |
5639 | switch (IFM_SUBTYPE(ifm->ifm_media)) { |
5640 | case IFM_AUTO: |
5641 | /* |
5642 | * The BCM5704 ASIC appears to have a special |
5643 | * mechanism for programming the autoneg |
5644 | * advertisement registers in TBI mode. |
5645 | */ |
5646 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { |
5647 | uint32_t sgdig; |
5648 | sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); |
5649 | if (sgdig & BGE_SGDIGSTS_DONE) { |
5650 | CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); |
5651 | sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); |
5652 | sgdig |= BGE_SGDIGCFG_AUTO | |
5653 | BGE_SGDIGCFG_PAUSE_CAP | |
5654 | BGE_SGDIGCFG_ASYM_PAUSE; |
5655 | CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, |
5656 | sgdig | BGE_SGDIGCFG_SEND); |
5657 | DELAY(5); |
5658 | CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, |
5659 | sgdig); |
5660 | } |
5661 | } |
5662 | break; |
5663 | case IFM_1000_SX: |
5664 | if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { |
5665 | BGE_CLRBIT(sc, BGE_MAC_MODE, |
5666 | BGE_MACMODE_HALF_DUPLEX); |
5667 | } else { |
5668 | BGE_SETBIT(sc, BGE_MAC_MODE, |
5669 | BGE_MACMODE_HALF_DUPLEX); |
5670 | } |
5671 | DELAY(40); |
5672 | break; |
5673 | default: |
5674 | return EINVAL; |
5675 | } |
5676 | /* XXX 802.3x flow control for 1000BASE-SX */ |
5677 | return 0; |
5678 | } |
5679 | |
5680 | if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) && |
5681 | (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) { |
5682 | uint32_t reg; |
5683 | |
5684 | reg = CSR_READ_4(sc, BGE_CPMU_CTRL); |
5685 | if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) { |
5686 | reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY; |
5687 | CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); |
5688 | } |
5689 | } |
5690 | |
5691 | BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); |
5692 | if ((rc = mii_mediachg(mii)) == ENXIO) |
5693 | return 0; |
5694 | |
5695 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { |
5696 | uint32_t reg; |
5697 | |
5698 | reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK); |
5699 | if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK) |
5700 | == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) { |
5701 | reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK; |
5702 | delay(40); |
5703 | CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg); |
5704 | } |
5705 | } |
5706 | |
5707 | /* |
5708 | * Force an interrupt so that we will call bge_link_upd |
5709 | * if needed and clear any pending link state attention. |
5710 | * Without this we are not getting any further interrupts |
5711 | * for link state changes and thus will not UP the link and |
5712 | * not be able to send in bge_start. The only way to get |
5713 | * things working was to receive a packet and get a RX intr. |
5714 | */ |
5715 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || |
5716 | sc->bge_flags & BGEF_IS_5788) |
5717 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); |
5718 | else |
5719 | BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); |
5720 | |
5721 | return rc; |
5722 | } |
5723 | |
5724 | /* |
5725 | * Report current media status. |
5726 | */ |
5727 | static void |
5728 | bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
5729 | { |
5730 | struct bge_softc *sc = ifp->if_softc; |
5731 | struct mii_data *mii = &sc->bge_mii; |
5732 | |
5733 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
5734 | ifmr->ifm_status = IFM_AVALID; |
5735 | ifmr->ifm_active = IFM_ETHER; |
5736 | if (CSR_READ_4(sc, BGE_MAC_STS) & |
5737 | BGE_MACSTAT_TBI_PCS_SYNCHED) |
5738 | ifmr->ifm_status |= IFM_ACTIVE; |
5739 | ifmr->ifm_active |= IFM_1000_SX; |
5740 | if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) |
5741 | ifmr->ifm_active |= IFM_HDX; |
5742 | else |
5743 | ifmr->ifm_active |= IFM_FDX; |
5744 | return; |
5745 | } |
5746 | |
5747 | mii_pollstat(mii); |
5748 | ifmr->ifm_status = mii->mii_media_status; |
5749 | ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | |
5750 | sc->bge_flowflags; |
5751 | } |
5752 | |
5753 | static int |
5754 | bge_ifflags_cb(struct ethercom *ec) |
5755 | { |
5756 | struct ifnet *ifp = &ec->ec_if; |
5757 | struct bge_softc *sc = ifp->if_softc; |
5758 | int change = ifp->if_flags ^ sc->bge_if_flags; |
5759 | |
5760 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) |
5761 | return ENETRESET; |
5762 | else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) |
5763 | return 0; |
5764 | |
5765 | if ((ifp->if_flags & IFF_PROMISC) == 0) |
5766 | BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); |
5767 | else |
5768 | BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); |
5769 | |
5770 | bge_setmulti(sc); |
5771 | |
5772 | sc->bge_if_flags = ifp->if_flags; |
5773 | return 0; |
5774 | } |
5775 | |
5776 | static int |
5777 | bge_ioctl(struct ifnet *ifp, u_long command, void *data) |
5778 | { |
5779 | struct bge_softc *sc = ifp->if_softc; |
5780 | struct ifreq *ifr = (struct ifreq *) data; |
5781 | int s, error = 0; |
5782 | struct mii_data *mii; |
5783 | |
5784 | s = splnet(); |
5785 | |
5786 | switch (command) { |
5787 | case SIOCSIFMEDIA: |
5788 | /* XXX Flow control is not supported for 1000BASE-SX */ |
5789 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
5790 | ifr->ifr_media &= ~IFM_ETH_FMASK; |
5791 | sc->bge_flowflags = 0; |
5792 | } |
5793 | |
5794 | /* Flow control requires full-duplex mode. */ |
5795 | if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || |
5796 | (ifr->ifr_media & IFM_FDX) == 0) { |
5797 | ifr->ifr_media &= ~IFM_ETH_FMASK; |
5798 | } |
5799 | if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { |
5800 | if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { |
5801 | /* We can do both TXPAUSE and RXPAUSE. */ |
5802 | ifr->ifr_media |= |
5803 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
5804 | } |
5805 | sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; |
5806 | } |
5807 | /* FALLTHROUGH */ |
5808 | case SIOCGIFMEDIA: |
5809 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
5810 | error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, |
5811 | command); |
5812 | } else { |
5813 | mii = &sc->bge_mii; |
5814 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, |
5815 | command); |
5816 | } |
5817 | break; |
5818 | default: |
5819 | if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) |
5820 | break; |
5821 | |
5822 | error = 0; |
5823 | |
5824 | if (command != SIOCADDMULTI && command != SIOCDELMULTI) |
5825 | ; |
5826 | else if (ifp->if_flags & IFF_RUNNING) |
5827 | bge_setmulti(sc); |
5828 | break; |
5829 | } |
5830 | |
5831 | splx(s); |
5832 | |
5833 | return error; |
5834 | } |
5835 | |
5836 | static void |
5837 | bge_watchdog(struct ifnet *ifp) |
5838 | { |
5839 | struct bge_softc *sc; |
5840 | |
5841 | sc = ifp->if_softc; |
5842 | |
5843 | aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n" ); |
5844 | |
5845 | ifp->if_flags &= ~IFF_RUNNING; |
5846 | bge_init(ifp); |
5847 | |
5848 | ifp->if_oerrors++; |
5849 | } |
5850 | |
5851 | static void |
5852 | bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) |
5853 | { |
5854 | int i; |
5855 | |
5856 | BGE_CLRBIT_FLUSH(sc, reg, bit); |
5857 | |
5858 | for (i = 0; i < 1000; i++) { |
5859 | delay(100); |
5860 | if ((CSR_READ_4(sc, reg) & bit) == 0) |
5861 | return; |
5862 | } |
5863 | |
5864 | /* |
5865 | * Doesn't print only when the register is BGE_SRS_MODE. It occurs |
5866 | * on some environment (and once after boot?) |
5867 | */ |
5868 | if (reg != BGE_SRS_MODE) |
5869 | aprint_error_dev(sc->bge_dev, |
5870 | "block failed to stop: reg 0x%lx, bit 0x%08x\n" , |
5871 | (u_long)reg, bit); |
5872 | } |
5873 | |
5874 | /* |
5875 | * Stop the adapter and free any mbufs allocated to the |
5876 | * RX and TX lists. |
5877 | */ |
5878 | static void |
5879 | bge_stop(struct ifnet *ifp, int disable) |
5880 | { |
5881 | struct bge_softc *sc = ifp->if_softc; |
5882 | |
5883 | if (disable) { |
5884 | sc->bge_detaching = 1; |
5885 | callout_halt(&sc->bge_timeout, NULL); |
5886 | } else |
5887 | callout_stop(&sc->bge_timeout); |
5888 | |
5889 | /* Disable host interrupts. */ |
5890 | BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); |
5891 | bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); |
5892 | |
5893 | /* |
5894 | * Tell firmware we're shutting down. |
5895 | */ |
5896 | bge_stop_fw(sc); |
5897 | bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); |
5898 | |
5899 | /* |
5900 | * Disable all of the receiver blocks. |
5901 | */ |
5902 | bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); |
5903 | bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); |
5904 | bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); |
5905 | if (BGE_IS_5700_FAMILY(sc)) |
5906 | bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); |
5907 | bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); |
5908 | bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); |
5909 | bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); |
5910 | |
5911 | /* |
5912 | * Disable all of the transmit blocks. |
5913 | */ |
5914 | bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); |
5915 | bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); |
5916 | bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); |
5917 | bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); |
5918 | bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); |
5919 | if (BGE_IS_5700_FAMILY(sc)) |
5920 | bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); |
5921 | bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); |
5922 | |
5923 | BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB); |
5924 | delay(40); |
5925 | |
5926 | bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); |
5927 | |
5928 | /* |
5929 | * Shut down all of the memory managers and related |
5930 | * state machines. |
5931 | */ |
5932 | /* 5718 step 5a,5b */ |
5933 | bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); |
5934 | bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); |
5935 | if (BGE_IS_5700_FAMILY(sc)) |
5936 | bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); |
5937 | |
5938 | /* 5718 step 5c,5d */ |
5939 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); |
5940 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); |
5941 | |
5942 | if (BGE_IS_5700_FAMILY(sc)) { |
5943 | bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); |
5944 | bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); |
5945 | } |
5946 | |
5947 | bge_reset(sc); |
5948 | bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); |
5949 | bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); |
5950 | |
5951 | /* |
5952 | * Keep the ASF firmware running if up. |
5953 | */ |
5954 | if (sc->bge_asf_mode & ASF_STACKUP) |
5955 | BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); |
5956 | else |
5957 | BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); |
5958 | |
5959 | /* Free the RX lists. */ |
5960 | bge_free_rx_ring_std(sc); |
5961 | |
5962 | /* Free jumbo RX list. */ |
5963 | if (BGE_IS_JUMBO_CAPABLE(sc)) |
5964 | bge_free_rx_ring_jumbo(sc); |
5965 | |
5966 | /* Free TX buffers. */ |
5967 | bge_free_tx_ring(sc); |
5968 | |
5969 | /* |
5970 | * Isolate/power down the PHY. |
5971 | */ |
5972 | if (!(sc->bge_flags & BGEF_FIBER_TBI)) |
5973 | mii_down(&sc->bge_mii); |
5974 | |
5975 | sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; |
5976 | |
5977 | /* Clear MAC's link state (PHY may still have link UP). */ |
5978 | BGE_STS_CLRBIT(sc, BGE_STS_LINK); |
5979 | |
5980 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
5981 | } |
5982 | |
5983 | static void |
5984 | bge_link_upd(struct bge_softc *sc) |
5985 | { |
5986 | struct ifnet *ifp = &sc->ethercom.ec_if; |
5987 | struct mii_data *mii = &sc->bge_mii; |
5988 | uint32_t status; |
5989 | int link; |
5990 | |
5991 | /* Clear 'pending link event' flag */ |
5992 | BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); |
5993 | |
5994 | /* |
5995 | * Process link state changes. |
5996 | * Grrr. The link status word in the status block does |
5997 | * not work correctly on the BCM5700 rev AX and BX chips, |
5998 | * according to all available information. Hence, we have |
5999 | * to enable MII interrupts in order to properly obtain |
6000 | * async link changes. Unfortunately, this also means that |
6001 | * we have to read the MAC status register to detect link |
6002 | * changes, thereby adding an additional register access to |
6003 | * the interrupt handler. |
6004 | */ |
6005 | |
6006 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { |
6007 | status = CSR_READ_4(sc, BGE_MAC_STS); |
6008 | if (status & BGE_MACSTAT_MI_INTERRUPT) { |
6009 | mii_pollstat(mii); |
6010 | |
6011 | if (!BGE_STS_BIT(sc, BGE_STS_LINK) && |
6012 | mii->mii_media_status & IFM_ACTIVE && |
6013 | IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) |
6014 | BGE_STS_SETBIT(sc, BGE_STS_LINK); |
6015 | else if (BGE_STS_BIT(sc, BGE_STS_LINK) && |
6016 | (!(mii->mii_media_status & IFM_ACTIVE) || |
6017 | IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) |
6018 | BGE_STS_CLRBIT(sc, BGE_STS_LINK); |
6019 | |
6020 | /* Clear the interrupt */ |
6021 | CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, |
6022 | BGE_EVTENB_MI_INTERRUPT); |
6023 | bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, |
6024 | BRGPHY_MII_ISR); |
6025 | bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, |
6026 | BRGPHY_MII_IMR, BRGPHY_INTRS); |
6027 | } |
6028 | return; |
6029 | } |
6030 | |
6031 | if (sc->bge_flags & BGEF_FIBER_TBI) { |
6032 | status = CSR_READ_4(sc, BGE_MAC_STS); |
6033 | if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { |
6034 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { |
6035 | BGE_STS_SETBIT(sc, BGE_STS_LINK); |
6036 | if (BGE_ASICREV(sc->bge_chipid) |
6037 | == BGE_ASICREV_BCM5704) { |
6038 | BGE_CLRBIT(sc, BGE_MAC_MODE, |
6039 | BGE_MACMODE_TBI_SEND_CFGS); |
6040 | DELAY(40); |
6041 | } |
6042 | CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); |
6043 | if_link_state_change(ifp, LINK_STATE_UP); |
6044 | } |
6045 | } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { |
6046 | BGE_STS_CLRBIT(sc, BGE_STS_LINK); |
6047 | if_link_state_change(ifp, LINK_STATE_DOWN); |
6048 | } |
6049 | } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { |
6050 | /* |
6051 | * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED |
6052 | * bit in status word always set. Workaround this bug by |
6053 | * reading PHY link status directly. |
6054 | */ |
6055 | link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? |
6056 | BGE_STS_LINK : 0; |
6057 | |
6058 | if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { |
6059 | mii_pollstat(mii); |
6060 | |
6061 | if (!BGE_STS_BIT(sc, BGE_STS_LINK) && |
6062 | mii->mii_media_status & IFM_ACTIVE && |
6063 | IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) |
6064 | BGE_STS_SETBIT(sc, BGE_STS_LINK); |
6065 | else if (BGE_STS_BIT(sc, BGE_STS_LINK) && |
6066 | (!(mii->mii_media_status & IFM_ACTIVE) || |
6067 | IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) |
6068 | BGE_STS_CLRBIT(sc, BGE_STS_LINK); |
6069 | } |
6070 | } else { |
6071 | /* |
6072 | * For controllers that call mii_tick, we have to poll |
6073 | * link status. |
6074 | */ |
6075 | mii_pollstat(mii); |
6076 | } |
6077 | |
6078 | if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { |
6079 | uint32_t reg, scale; |
6080 | |
6081 | reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) & |
6082 | BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK; |
6083 | if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5) |
6084 | scale = 65; |
6085 | else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25) |
6086 | scale = 6; |
6087 | else |
6088 | scale = 12; |
6089 | |
6090 | reg = CSR_READ_4(sc, BGE_MISC_CFG) & |
6091 | ~BGE_MISCCFG_TIMER_PRESCALER; |
6092 | reg |= scale << 1; |
6093 | CSR_WRITE_4(sc, BGE_MISC_CFG, reg); |
6094 | } |
6095 | /* Clear the attention */ |
6096 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| |
6097 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| |
6098 | BGE_MACSTAT_LINK_CHANGED); |
6099 | } |
6100 | |
6101 | static int |
6102 | bge_sysctl_verify(SYSCTLFN_ARGS) |
6103 | { |
6104 | int error, t; |
6105 | struct sysctlnode node; |
6106 | |
6107 | node = *rnode; |
6108 | t = *(int*)rnode->sysctl_data; |
6109 | node.sysctl_data = &t; |
6110 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
6111 | if (error || newp == NULL) |
6112 | return error; |
6113 | |
6114 | #if 0 |
6115 | DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n" , __func__, t, |
6116 | node.sysctl_num, rnode->sysctl_num)); |
6117 | #endif |
6118 | |
6119 | if (node.sysctl_num == bge_rxthresh_nodenum) { |
6120 | if (t < 0 || t >= NBGE_RX_THRESH) |
6121 | return EINVAL; |
6122 | bge_update_all_threshes(t); |
6123 | } else |
6124 | return EINVAL; |
6125 | |
6126 | *(int*)rnode->sysctl_data = t; |
6127 | |
6128 | return 0; |
6129 | } |
6130 | |
6131 | /* |
6132 | * Set up sysctl(3) MIB, hw.bge.*. |
6133 | */ |
6134 | static void |
6135 | bge_sysctl_init(struct bge_softc *sc) |
6136 | { |
6137 | int rc, bge_root_num; |
6138 | const struct sysctlnode *node; |
6139 | |
6140 | if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, |
6141 | 0, CTLTYPE_NODE, "bge" , |
6142 | SYSCTL_DESCR("BGE interface controls" ), |
6143 | NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { |
6144 | goto out; |
6145 | } |
6146 | |
6147 | bge_root_num = node->sysctl_num; |
6148 | |
6149 | /* BGE Rx interrupt mitigation level */ |
6150 | if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, |
6151 | CTLFLAG_READWRITE, |
6152 | CTLTYPE_INT, "rx_lvl" , |
6153 | SYSCTL_DESCR("BGE receive interrupt mitigation level" ), |
6154 | bge_sysctl_verify, 0, |
6155 | &bge_rx_thresh_lvl, |
6156 | 0, CTL_HW, bge_root_num, CTL_CREATE, |
6157 | CTL_EOL)) != 0) { |
6158 | goto out; |
6159 | } |
6160 | |
6161 | bge_rxthresh_nodenum = node->sysctl_num; |
6162 | |
6163 | return; |
6164 | |
6165 | out: |
6166 | aprint_error("%s: sysctl_createv failed (rc = %d)\n" , __func__, rc); |
6167 | } |
6168 | |
6169 | #ifdef BGE_DEBUG |
6170 | void |
6171 | bge_debug_info(struct bge_softc *sc) |
6172 | { |
6173 | |
6174 | printf("Hardware Flags:\n" ); |
6175 | if (BGE_IS_57765_PLUS(sc)) |
6176 | printf(" - 57765 Plus\n" ); |
6177 | if (BGE_IS_5717_PLUS(sc)) |
6178 | printf(" - 5717 Plus\n" ); |
6179 | if (BGE_IS_5755_PLUS(sc)) |
6180 | printf(" - 5755 Plus\n" ); |
6181 | if (BGE_IS_575X_PLUS(sc)) |
6182 | printf(" - 575X Plus\n" ); |
6183 | if (BGE_IS_5705_PLUS(sc)) |
6184 | printf(" - 5705 Plus\n" ); |
6185 | if (BGE_IS_5714_FAMILY(sc)) |
6186 | printf(" - 5714 Family\n" ); |
6187 | if (BGE_IS_5700_FAMILY(sc)) |
6188 | printf(" - 5700 Family\n" ); |
6189 | if (sc->bge_flags & BGEF_IS_5788) |
6190 | printf(" - 5788\n" ); |
6191 | if (sc->bge_flags & BGEF_JUMBO_CAPABLE) |
6192 | printf(" - Supports Jumbo Frames\n" ); |
6193 | if (sc->bge_flags & BGEF_NO_EEPROM) |
6194 | printf(" - No EEPROM\n" ); |
6195 | if (sc->bge_flags & BGEF_PCIX) |
6196 | printf(" - PCI-X Bus\n" ); |
6197 | if (sc->bge_flags & BGEF_PCIE) |
6198 | printf(" - PCI Express Bus\n" ); |
6199 | if (sc->bge_flags & BGEF_RX_ALIGNBUG) |
6200 | printf(" - RX Alignment Bug\n" ); |
6201 | if (sc->bge_flags & BGEF_APE) |
6202 | printf(" - APE\n" ); |
6203 | if (sc->bge_flags & BGEF_CPMU_PRESENT) |
6204 | printf(" - CPMU\n" ); |
6205 | if (sc->bge_flags & BGEF_TSO) |
6206 | printf(" - TSO\n" ); |
6207 | if (sc->bge_flags & BGEF_TAGGED_STATUS) |
6208 | printf(" - TAGGED_STATUS\n" ); |
6209 | |
6210 | /* PHY related */ |
6211 | if (sc->bge_phy_flags & BGEPHYF_NO_3LED) |
6212 | printf(" - No 3 LEDs\n" ); |
6213 | if (sc->bge_phy_flags & BGEPHYF_CRC_BUG) |
6214 | printf(" - CRC bug\n" ); |
6215 | if (sc->bge_phy_flags & BGEPHYF_ADC_BUG) |
6216 | printf(" - ADC bug\n" ); |
6217 | if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG) |
6218 | printf(" - 5704 A0 bug\n" ); |
6219 | if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG) |
6220 | printf(" - jitter bug\n" ); |
6221 | if (sc->bge_phy_flags & BGEPHYF_BER_BUG) |
6222 | printf(" - BER bug\n" ); |
6223 | if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM) |
6224 | printf(" - adjust trim\n" ); |
6225 | if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED) |
6226 | printf(" - no wirespeed\n" ); |
6227 | |
6228 | /* ASF related */ |
6229 | if (sc->bge_asf_mode & ASF_ENABLE) |
6230 | printf(" - ASF enable\n" ); |
6231 | if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) |
6232 | printf(" - ASF new handshake\n" ); |
6233 | if (sc->bge_asf_mode & ASF_STACKUP) |
6234 | printf(" - ASF stackup\n" ); |
6235 | } |
6236 | #endif /* BGE_DEBUG */ |
6237 | |
6238 | static int |
6239 | bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) |
6240 | { |
6241 | prop_dictionary_t dict; |
6242 | prop_data_t ea; |
6243 | |
6244 | if ((sc->bge_flags & BGEF_NO_EEPROM) == 0) |
6245 | return 1; |
6246 | |
6247 | dict = device_properties(sc->bge_dev); |
6248 | ea = prop_dictionary_get(dict, "mac-address" ); |
6249 | if (ea != NULL) { |
6250 | KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); |
6251 | KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); |
6252 | memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); |
6253 | return 0; |
6254 | } |
6255 | |
6256 | return 1; |
6257 | } |
6258 | |
6259 | static int |
6260 | bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) |
6261 | { |
6262 | uint32_t mac_addr; |
6263 | |
6264 | mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); |
6265 | if ((mac_addr >> 16) == 0x484b) { |
6266 | ether_addr[0] = (uint8_t)(mac_addr >> 8); |
6267 | ether_addr[1] = (uint8_t)mac_addr; |
6268 | mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); |
6269 | ether_addr[2] = (uint8_t)(mac_addr >> 24); |
6270 | ether_addr[3] = (uint8_t)(mac_addr >> 16); |
6271 | ether_addr[4] = (uint8_t)(mac_addr >> 8); |
6272 | ether_addr[5] = (uint8_t)mac_addr; |
6273 | return 0; |
6274 | } |
6275 | return 1; |
6276 | } |
6277 | |
6278 | static int |
6279 | bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) |
6280 | { |
6281 | int mac_offset = BGE_EE_MAC_OFFSET; |
6282 | |
6283 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) |
6284 | mac_offset = BGE_EE_MAC_OFFSET_5906; |
6285 | |
6286 | return (bge_read_nvram(sc, ether_addr, mac_offset + 2, |
6287 | ETHER_ADDR_LEN)); |
6288 | } |
6289 | |
6290 | static int |
6291 | bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) |
6292 | { |
6293 | |
6294 | if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) |
6295 | return 1; |
6296 | |
6297 | return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, |
6298 | ETHER_ADDR_LEN)); |
6299 | } |
6300 | |
6301 | static int |
6302 | bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) |
6303 | { |
6304 | static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { |
6305 | /* NOTE: Order is critical */ |
6306 | bge_get_eaddr_fw, |
6307 | bge_get_eaddr_mem, |
6308 | bge_get_eaddr_nvram, |
6309 | bge_get_eaddr_eeprom, |
6310 | NULL |
6311 | }; |
6312 | const bge_eaddr_fcn_t *func; |
6313 | |
6314 | for (func = bge_eaddr_funcs; *func != NULL; ++func) { |
6315 | if ((*func)(sc, eaddr) == 0) |
6316 | break; |
6317 | } |
6318 | return (*func == NULL ? ENXIO : 0); |
6319 | } |
6320 | |