1 | /* $NetBSD: if_sip.c,v 1.163 2016/07/14 10:19:06 msaitoh Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /*- |
33 | * Copyright (c) 1999 Network Computer, Inc. |
34 | * All rights reserved. |
35 | * |
36 | * Redistribution and use in source and binary forms, with or without |
37 | * modification, are permitted provided that the following conditions |
38 | * are met: |
39 | * 1. Redistributions of source code must retain the above copyright |
40 | * notice, this list of conditions and the following disclaimer. |
41 | * 2. Redistributions in binary form must reproduce the above copyright |
42 | * notice, this list of conditions and the following disclaimer in the |
43 | * documentation and/or other materials provided with the distribution. |
44 | * 3. Neither the name of Network Computer, Inc. nor the names of its |
45 | * contributors may be used to endorse or promote products derived |
46 | * from this software without specific prior written permission. |
47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS |
49 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
50 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
51 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
52 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
53 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
54 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
55 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
56 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
57 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
58 | * POSSIBILITY OF SUCH DAMAGE. |
59 | */ |
60 | |
61 | /* |
62 | * Device driver for the Silicon Integrated Systems SiS 900, |
63 | * SiS 7016 10/100, National Semiconductor DP83815 10/100, and |
64 | * National Semiconductor DP83820 10/100/1000 PCI Ethernet |
65 | * controllers. |
66 | * |
67 | * Originally written to support the SiS 900 by Jason R. Thorpe for |
68 | * Network Computer, Inc. |
69 | * |
70 | * TODO: |
71 | * |
72 | * - Reduce the Rx interrupt load. |
73 | */ |
74 | |
75 | #include <sys/cdefs.h> |
76 | __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.163 2016/07/14 10:19:06 msaitoh Exp $" ); |
77 | |
78 | |
79 | |
80 | #include <sys/param.h> |
81 | #include <sys/systm.h> |
82 | #include <sys/callout.h> |
83 | #include <sys/mbuf.h> |
84 | #include <sys/malloc.h> |
85 | #include <sys/kernel.h> |
86 | #include <sys/socket.h> |
87 | #include <sys/ioctl.h> |
88 | #include <sys/errno.h> |
89 | #include <sys/device.h> |
90 | #include <sys/queue.h> |
91 | |
92 | #include <sys/rndsource.h> |
93 | |
94 | #include <net/if.h> |
95 | #include <net/if_dl.h> |
96 | #include <net/if_media.h> |
97 | #include <net/if_ether.h> |
98 | |
99 | #include <net/bpf.h> |
100 | |
101 | #include <sys/bus.h> |
102 | #include <sys/intr.h> |
103 | #include <machine/endian.h> |
104 | |
105 | #include <dev/mii/mii.h> |
106 | #include <dev/mii/miivar.h> |
107 | #include <dev/mii/mii_bitbang.h> |
108 | |
109 | #include <dev/pci/pcireg.h> |
110 | #include <dev/pci/pcivar.h> |
111 | #include <dev/pci/pcidevs.h> |
112 | |
113 | #include <dev/pci/if_sipreg.h> |
114 | |
115 | /* |
116 | * Transmit descriptor list size. This is arbitrary, but allocate |
117 | * enough descriptors for 128 pending transmissions, and 8 segments |
118 | * per packet (64 for DP83820 for jumbo frames). |
119 | * |
120 | * This MUST work out to a power of 2. |
121 | */ |
122 | #define GSIP_NTXSEGS_ALLOC 16 |
123 | #define SIP_NTXSEGS_ALLOC 8 |
124 | |
125 | #define SIP_TXQUEUELEN 256 |
126 | #define MAX_SIP_NTXDESC \ |
127 | (SIP_TXQUEUELEN * MAX(SIP_NTXSEGS_ALLOC, GSIP_NTXSEGS_ALLOC)) |
128 | |
129 | /* |
130 | * Receive descriptor list size. We have one Rx buffer per incoming |
131 | * packet, so this logic is a little simpler. |
132 | * |
133 | * Actually, on the DP83820, we allow the packet to consume more than |
134 | * one buffer, in order to support jumbo Ethernet frames. In that |
135 | * case, a packet may consume up to 5 buffers (assuming a 2048 byte |
136 | * mbuf cluster). 256 receive buffers is only 51 maximum size packets, |
137 | * so we'd better be quick about handling receive interrupts. |
138 | */ |
139 | #define GSIP_NRXDESC 256 |
140 | #define SIP_NRXDESC 128 |
141 | |
142 | #define MAX_SIP_NRXDESC MAX(GSIP_NRXDESC, SIP_NRXDESC) |
143 | |
144 | /* |
145 | * Control structures are DMA'd to the SiS900 chip. We allocate them in |
146 | * a single clump that maps to a single DMA segment to make several things |
147 | * easier. |
148 | */ |
149 | struct sip_control_data { |
150 | /* |
151 | * The transmit descriptors. |
152 | */ |
153 | struct sip_desc scd_txdescs[MAX_SIP_NTXDESC]; |
154 | |
155 | /* |
156 | * The receive descriptors. |
157 | */ |
158 | struct sip_desc scd_rxdescs[MAX_SIP_NRXDESC]; |
159 | }; |
160 | |
161 | #define SIP_CDOFF(x) offsetof(struct sip_control_data, x) |
162 | #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)]) |
163 | #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)]) |
164 | |
165 | /* |
166 | * Software state for transmit jobs. |
167 | */ |
168 | struct sip_txsoft { |
169 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
170 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
171 | int txs_firstdesc; /* first descriptor in packet */ |
172 | int txs_lastdesc; /* last descriptor in packet */ |
173 | SIMPLEQ_ENTRY(sip_txsoft) txs_q; |
174 | }; |
175 | |
176 | SIMPLEQ_HEAD(sip_txsq, sip_txsoft); |
177 | |
178 | /* |
179 | * Software state for receive jobs. |
180 | */ |
181 | struct sip_rxsoft { |
182 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
183 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
184 | }; |
185 | |
186 | enum sip_attach_stage { |
187 | SIP_ATTACH_FIN = 0 |
188 | , SIP_ATTACH_CREATE_RXMAP |
189 | , SIP_ATTACH_CREATE_TXMAP |
190 | , SIP_ATTACH_LOAD_MAP |
191 | , SIP_ATTACH_CREATE_MAP |
192 | , SIP_ATTACH_MAP_MEM |
193 | , SIP_ATTACH_ALLOC_MEM |
194 | , SIP_ATTACH_INTR |
195 | , SIP_ATTACH_MAP |
196 | }; |
197 | |
198 | /* |
199 | * Software state per device. |
200 | */ |
201 | struct sip_softc { |
202 | device_t sc_dev; /* generic device information */ |
203 | device_suspensor_t sc_suspensor; |
204 | pmf_qual_t sc_qual; |
205 | |
206 | bus_space_tag_t sc_st; /* bus space tag */ |
207 | bus_space_handle_t sc_sh; /* bus space handle */ |
208 | bus_size_t sc_sz; /* bus space size */ |
209 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
210 | pci_chipset_tag_t sc_pc; |
211 | bus_dma_segment_t sc_seg; |
212 | struct ethercom sc_ethercom; /* ethernet common data */ |
213 | |
214 | const struct sip_product *sc_model; /* which model are we? */ |
215 | int sc_gigabit; /* 1: 83820, 0: other */ |
216 | int sc_rev; /* chip revision */ |
217 | |
218 | void *sc_ih; /* interrupt cookie */ |
219 | |
220 | struct mii_data sc_mii; /* MII/media information */ |
221 | |
222 | callout_t sc_tick_ch; /* tick callout */ |
223 | |
224 | bus_dmamap_t sc_cddmamap; /* control data DMA map */ |
225 | #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr |
226 | |
227 | /* |
228 | * Software state for transmit and receive descriptors. |
229 | */ |
230 | struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN]; |
231 | struct sip_rxsoft sc_rxsoft[MAX_SIP_NRXDESC]; |
232 | |
233 | /* |
234 | * Control data structures. |
235 | */ |
236 | struct sip_control_data *sc_control_data; |
237 | #define sc_txdescs sc_control_data->scd_txdescs |
238 | #define sc_rxdescs sc_control_data->scd_rxdescs |
239 | |
240 | #ifdef SIP_EVENT_COUNTERS |
241 | /* |
242 | * Event counters. |
243 | */ |
244 | struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ |
245 | struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ |
246 | struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */ |
247 | struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */ |
248 | struct evcnt sc_ev_txiintr; /* Tx idle interrupts */ |
249 | struct evcnt sc_ev_rxintr; /* Rx interrupts */ |
250 | struct evcnt sc_ev_hiberr; /* HIBERR interrupts */ |
251 | struct evcnt sc_ev_rxpause; /* PAUSE received */ |
252 | /* DP83820 only */ |
253 | struct evcnt sc_ev_txpause; /* PAUSE transmitted */ |
254 | struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ |
255 | struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */ |
256 | struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */ |
257 | struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ |
258 | struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */ |
259 | struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */ |
260 | #endif /* SIP_EVENT_COUNTERS */ |
261 | |
262 | u_int32_t sc_txcfg; /* prototype TXCFG register */ |
263 | u_int32_t sc_rxcfg; /* prototype RXCFG register */ |
264 | u_int32_t sc_imr; /* prototype IMR register */ |
265 | u_int32_t sc_rfcr; /* prototype RFCR register */ |
266 | |
267 | u_int32_t sc_cfg; /* prototype CFG register */ |
268 | |
269 | u_int32_t sc_gpior; /* prototype GPIOR register */ |
270 | |
271 | u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */ |
272 | u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */ |
273 | |
274 | u_int32_t sc_rx_drain_thresh; /* receive drain threshold */ |
275 | |
276 | int sc_flowflags; /* 802.3x flow control flags */ |
277 | int sc_rx_flow_thresh; /* Rx FIFO threshold for flow control */ |
278 | int sc_paused; /* paused indication */ |
279 | |
280 | int sc_txfree; /* number of free Tx descriptors */ |
281 | int sc_txnext; /* next ready Tx descriptor */ |
282 | int sc_txwin; /* Tx descriptors since last intr */ |
283 | |
284 | struct sip_txsq sc_txfreeq; /* free Tx descsofts */ |
285 | struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */ |
286 | |
287 | /* values of interface state at last init */ |
288 | struct { |
289 | /* if_capenable */ |
290 | uint64_t if_capenable; |
291 | /* ec_capenable */ |
292 | int ec_capenable; |
293 | /* VLAN_ATTACHED */ |
294 | int is_vlan; |
295 | } sc_prev; |
296 | |
297 | short sc_if_flags; |
298 | |
299 | int sc_rxptr; /* next ready Rx descriptor/descsoft */ |
300 | int sc_rxdiscard; |
301 | int sc_rxlen; |
302 | struct mbuf *sc_rxhead; |
303 | struct mbuf *sc_rxtail; |
304 | struct mbuf **sc_rxtailp; |
305 | |
306 | int sc_ntxdesc; |
307 | int sc_ntxdesc_mask; |
308 | |
309 | int sc_nrxdesc_mask; |
310 | |
311 | const struct sip_parm { |
312 | const struct sip_regs { |
313 | int r_rxcfg; |
314 | int r_txcfg; |
315 | } p_regs; |
316 | |
317 | const struct sip_bits { |
318 | uint32_t b_txcfg_mxdma_8; |
319 | uint32_t b_txcfg_mxdma_16; |
320 | uint32_t b_txcfg_mxdma_32; |
321 | uint32_t b_txcfg_mxdma_64; |
322 | uint32_t b_txcfg_mxdma_128; |
323 | uint32_t b_txcfg_mxdma_256; |
324 | uint32_t b_txcfg_mxdma_512; |
325 | uint32_t b_txcfg_flth_mask; |
326 | uint32_t b_txcfg_drth_mask; |
327 | |
328 | uint32_t b_rxcfg_mxdma_8; |
329 | uint32_t b_rxcfg_mxdma_16; |
330 | uint32_t b_rxcfg_mxdma_32; |
331 | uint32_t b_rxcfg_mxdma_64; |
332 | uint32_t b_rxcfg_mxdma_128; |
333 | uint32_t b_rxcfg_mxdma_256; |
334 | uint32_t b_rxcfg_mxdma_512; |
335 | |
336 | uint32_t b_isr_txrcmp; |
337 | uint32_t b_isr_rxrcmp; |
338 | uint32_t b_isr_dperr; |
339 | uint32_t b_isr_sserr; |
340 | uint32_t b_isr_rmabt; |
341 | uint32_t b_isr_rtabt; |
342 | |
343 | uint32_t b_cmdsts_size_mask; |
344 | } p_bits; |
345 | int p_filtmem; |
346 | int p_rxbuf_len; |
347 | bus_size_t p_tx_dmamap_size; |
348 | int p_ntxsegs; |
349 | int p_ntxsegs_alloc; |
350 | int p_nrxdesc; |
351 | } *sc_parm; |
352 | |
353 | void (*sc_rxintr)(struct sip_softc *); |
354 | |
355 | krndsource_t rnd_source; /* random source */ |
356 | }; |
357 | |
358 | #define sc_bits sc_parm->p_bits |
359 | #define sc_regs sc_parm->p_regs |
360 | |
361 | static const struct sip_parm sip_parm = { |
362 | .p_filtmem = OTHER_RFCR_NS_RFADDR_FILTMEM |
363 | , .p_rxbuf_len = MCLBYTES - 1 /* field width */ |
364 | , .p_tx_dmamap_size = MCLBYTES |
365 | , .p_ntxsegs = 16 |
366 | , .p_ntxsegs_alloc = SIP_NTXSEGS_ALLOC |
367 | , .p_nrxdesc = SIP_NRXDESC |
368 | , .p_bits = { |
369 | .b_txcfg_mxdma_8 = 0x00200000 /* 8 bytes */ |
370 | , .b_txcfg_mxdma_16 = 0x00300000 /* 16 bytes */ |
371 | , .b_txcfg_mxdma_32 = 0x00400000 /* 32 bytes */ |
372 | , .b_txcfg_mxdma_64 = 0x00500000 /* 64 bytes */ |
373 | , .b_txcfg_mxdma_128 = 0x00600000 /* 128 bytes */ |
374 | , .b_txcfg_mxdma_256 = 0x00700000 /* 256 bytes */ |
375 | , .b_txcfg_mxdma_512 = 0x00000000 /* 512 bytes */ |
376 | , .b_txcfg_flth_mask = 0x00003f00 /* Tx fill threshold */ |
377 | , .b_txcfg_drth_mask = 0x0000003f /* Tx drain threshold */ |
378 | |
379 | , .b_rxcfg_mxdma_8 = 0x00200000 /* 8 bytes */ |
380 | , .b_rxcfg_mxdma_16 = 0x00300000 /* 16 bytes */ |
381 | , .b_rxcfg_mxdma_32 = 0x00400000 /* 32 bytes */ |
382 | , .b_rxcfg_mxdma_64 = 0x00500000 /* 64 bytes */ |
383 | , .b_rxcfg_mxdma_128 = 0x00600000 /* 128 bytes */ |
384 | , .b_rxcfg_mxdma_256 = 0x00700000 /* 256 bytes */ |
385 | , .b_rxcfg_mxdma_512 = 0x00000000 /* 512 bytes */ |
386 | |
387 | , .b_isr_txrcmp = 0x02000000 /* transmit reset complete */ |
388 | , .b_isr_rxrcmp = 0x01000000 /* receive reset complete */ |
389 | , .b_isr_dperr = 0x00800000 /* detected parity error */ |
390 | , .b_isr_sserr = 0x00400000 /* signalled system error */ |
391 | , .b_isr_rmabt = 0x00200000 /* received master abort */ |
392 | , .b_isr_rtabt = 0x00100000 /* received target abort */ |
393 | , .b_cmdsts_size_mask = OTHER_CMDSTS_SIZE_MASK |
394 | } |
395 | , .p_regs = { |
396 | .r_rxcfg = OTHER_SIP_RXCFG, |
397 | .r_txcfg = OTHER_SIP_TXCFG |
398 | } |
399 | }, gsip_parm = { |
400 | .p_filtmem = DP83820_RFCR_NS_RFADDR_FILTMEM |
401 | , .p_rxbuf_len = MCLBYTES - 8 |
402 | , .p_tx_dmamap_size = ETHER_MAX_LEN_JUMBO |
403 | , .p_ntxsegs = 64 |
404 | , .p_ntxsegs_alloc = GSIP_NTXSEGS_ALLOC |
405 | , .p_nrxdesc = GSIP_NRXDESC |
406 | , .p_bits = { |
407 | .b_txcfg_mxdma_8 = 0x00100000 /* 8 bytes */ |
408 | , .b_txcfg_mxdma_16 = 0x00200000 /* 16 bytes */ |
409 | , .b_txcfg_mxdma_32 = 0x00300000 /* 32 bytes */ |
410 | , .b_txcfg_mxdma_64 = 0x00400000 /* 64 bytes */ |
411 | , .b_txcfg_mxdma_128 = 0x00500000 /* 128 bytes */ |
412 | , .b_txcfg_mxdma_256 = 0x00600000 /* 256 bytes */ |
413 | , .b_txcfg_mxdma_512 = 0x00700000 /* 512 bytes */ |
414 | , .b_txcfg_flth_mask = 0x0000ff00 /* Fx fill threshold */ |
415 | , .b_txcfg_drth_mask = 0x000000ff /* Tx drain threshold */ |
416 | |
417 | , .b_rxcfg_mxdma_8 = 0x00100000 /* 8 bytes */ |
418 | , .b_rxcfg_mxdma_16 = 0x00200000 /* 16 bytes */ |
419 | , .b_rxcfg_mxdma_32 = 0x00300000 /* 32 bytes */ |
420 | , .b_rxcfg_mxdma_64 = 0x00400000 /* 64 bytes */ |
421 | , .b_rxcfg_mxdma_128 = 0x00500000 /* 128 bytes */ |
422 | , .b_rxcfg_mxdma_256 = 0x00600000 /* 256 bytes */ |
423 | , .b_rxcfg_mxdma_512 = 0x00700000 /* 512 bytes */ |
424 | |
425 | , .b_isr_txrcmp = 0x00400000 /* transmit reset complete */ |
426 | , .b_isr_rxrcmp = 0x00200000 /* receive reset complete */ |
427 | , .b_isr_dperr = 0x00100000 /* detected parity error */ |
428 | , .b_isr_sserr = 0x00080000 /* signalled system error */ |
429 | , .b_isr_rmabt = 0x00040000 /* received master abort */ |
430 | , .b_isr_rtabt = 0x00020000 /* received target abort */ |
431 | , .b_cmdsts_size_mask = DP83820_CMDSTS_SIZE_MASK |
432 | } |
433 | , .p_regs = { |
434 | .r_rxcfg = DP83820_SIP_RXCFG, |
435 | .r_txcfg = DP83820_SIP_TXCFG |
436 | } |
437 | }; |
438 | |
439 | static inline int |
440 | sip_nexttx(const struct sip_softc *sc, int x) |
441 | { |
442 | return (x + 1) & sc->sc_ntxdesc_mask; |
443 | } |
444 | |
445 | static inline int |
446 | sip_nextrx(const struct sip_softc *sc, int x) |
447 | { |
448 | return (x + 1) & sc->sc_nrxdesc_mask; |
449 | } |
450 | |
451 | /* 83820 only */ |
452 | static inline void |
453 | sip_rxchain_reset(struct sip_softc *sc) |
454 | { |
455 | sc->sc_rxtailp = &sc->sc_rxhead; |
456 | *sc->sc_rxtailp = NULL; |
457 | sc->sc_rxlen = 0; |
458 | } |
459 | |
460 | /* 83820 only */ |
461 | static inline void |
462 | sip_rxchain_link(struct sip_softc *sc, struct mbuf *m) |
463 | { |
464 | *sc->sc_rxtailp = sc->sc_rxtail = m; |
465 | sc->sc_rxtailp = &m->m_next; |
466 | } |
467 | |
468 | #ifdef SIP_EVENT_COUNTERS |
469 | #define SIP_EVCNT_INCR(ev) (ev)->ev_count++ |
470 | #else |
471 | #define SIP_EVCNT_INCR(ev) /* nothing */ |
472 | #endif |
473 | |
474 | #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x))) |
475 | #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x))) |
476 | |
477 | static inline void |
478 | sip_cdtxsync(struct sip_softc *sc, const int x0, const int n0, const int ops) |
479 | { |
480 | int x, n; |
481 | |
482 | x = x0; |
483 | n = n0; |
484 | |
485 | /* If it will wrap around, sync to the end of the ring. */ |
486 | if (x + n > sc->sc_ntxdesc) { |
487 | bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, |
488 | SIP_CDTXOFF(x), sizeof(struct sip_desc) * |
489 | (sc->sc_ntxdesc - x), ops); |
490 | n -= (sc->sc_ntxdesc - x); |
491 | x = 0; |
492 | } |
493 | |
494 | /* Now sync whatever is left. */ |
495 | bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, |
496 | SIP_CDTXOFF(x), sizeof(struct sip_desc) * n, ops); |
497 | } |
498 | |
499 | static inline void |
500 | sip_cdrxsync(struct sip_softc *sc, int x, int ops) |
501 | { |
502 | bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, |
503 | SIP_CDRXOFF(x), sizeof(struct sip_desc), ops); |
504 | } |
505 | |
506 | #if 0 |
507 | #ifdef DP83820 |
508 | u_int32_t sipd_bufptr; /* pointer to DMA segment */ |
509 | u_int32_t sipd_cmdsts; /* command/status word */ |
510 | #else |
511 | u_int32_t sipd_cmdsts; /* command/status word */ |
512 | u_int32_t sipd_bufptr; /* pointer to DMA segment */ |
513 | #endif /* DP83820 */ |
514 | #endif /* 0 */ |
515 | |
516 | static inline volatile uint32_t * |
517 | sipd_cmdsts(struct sip_softc *sc, struct sip_desc *sipd) |
518 | { |
519 | return &sipd->sipd_cbs[(sc->sc_gigabit) ? 1 : 0]; |
520 | } |
521 | |
522 | static inline volatile uint32_t * |
523 | sipd_bufptr(struct sip_softc *sc, struct sip_desc *sipd) |
524 | { |
525 | return &sipd->sipd_cbs[(sc->sc_gigabit) ? 0 : 1]; |
526 | } |
527 | |
528 | static inline void |
529 | sip_init_rxdesc(struct sip_softc *sc, int x) |
530 | { |
531 | struct sip_rxsoft *rxs = &sc->sc_rxsoft[x]; |
532 | struct sip_desc *sipd = &sc->sc_rxdescs[x]; |
533 | |
534 | sipd->sipd_link = htole32(SIP_CDRXADDR(sc, sip_nextrx(sc, x))); |
535 | *sipd_bufptr(sc, sipd) = htole32(rxs->rxs_dmamap->dm_segs[0].ds_addr); |
536 | *sipd_cmdsts(sc, sipd) = htole32(CMDSTS_INTR | |
537 | (sc->sc_parm->p_rxbuf_len & sc->sc_bits.b_cmdsts_size_mask)); |
538 | sipd->sipd_extsts = 0; |
539 | sip_cdrxsync(sc, x, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
540 | } |
541 | |
542 | #define SIP_CHIP_VERS(sc, v, p, r) \ |
543 | ((sc)->sc_model->sip_vendor == (v) && \ |
544 | (sc)->sc_model->sip_product == (p) && \ |
545 | (sc)->sc_rev == (r)) |
546 | |
547 | #define SIP_CHIP_MODEL(sc, v, p) \ |
548 | ((sc)->sc_model->sip_vendor == (v) && \ |
549 | (sc)->sc_model->sip_product == (p)) |
550 | |
551 | #define SIP_SIS900_REV(sc, rev) \ |
552 | SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev)) |
553 | |
554 | #define SIP_TIMEOUT 1000 |
555 | |
556 | static int sip_ifflags_cb(struct ethercom *); |
557 | static void sipcom_start(struct ifnet *); |
558 | static void sipcom_watchdog(struct ifnet *); |
559 | static int sipcom_ioctl(struct ifnet *, u_long, void *); |
560 | static int sipcom_init(struct ifnet *); |
561 | static void sipcom_stop(struct ifnet *, int); |
562 | |
563 | static bool sipcom_reset(struct sip_softc *); |
564 | static void sipcom_rxdrain(struct sip_softc *); |
565 | static int sipcom_add_rxbuf(struct sip_softc *, int); |
566 | static void sipcom_read_eeprom(struct sip_softc *, int, int, |
567 | u_int16_t *); |
568 | static void sipcom_tick(void *); |
569 | |
570 | static void sipcom_sis900_set_filter(struct sip_softc *); |
571 | static void sipcom_dp83815_set_filter(struct sip_softc *); |
572 | |
573 | static void sipcom_dp83820_read_macaddr(struct sip_softc *, |
574 | const struct pci_attach_args *, u_int8_t *); |
575 | static void sipcom_sis900_eeprom_delay(struct sip_softc *sc); |
576 | static void sipcom_sis900_read_macaddr(struct sip_softc *, |
577 | const struct pci_attach_args *, u_int8_t *); |
578 | static void sipcom_dp83815_read_macaddr(struct sip_softc *, |
579 | const struct pci_attach_args *, u_int8_t *); |
580 | |
581 | static int sipcom_intr(void *); |
582 | static void sipcom_txintr(struct sip_softc *); |
583 | static void sip_rxintr(struct sip_softc *); |
584 | static void gsip_rxintr(struct sip_softc *); |
585 | |
586 | static int sipcom_dp83820_mii_readreg(device_t, int, int); |
587 | static void sipcom_dp83820_mii_writereg(device_t, int, int, int); |
588 | static void sipcom_dp83820_mii_statchg(struct ifnet *); |
589 | |
590 | static int sipcom_sis900_mii_readreg(device_t, int, int); |
591 | static void sipcom_sis900_mii_writereg(device_t, int, int, int); |
592 | static void sipcom_sis900_mii_statchg(struct ifnet *); |
593 | |
594 | static int sipcom_dp83815_mii_readreg(device_t, int, int); |
595 | static void sipcom_dp83815_mii_writereg(device_t, int, int, int); |
596 | static void sipcom_dp83815_mii_statchg(struct ifnet *); |
597 | |
598 | static void sipcom_mediastatus(struct ifnet *, struct ifmediareq *); |
599 | |
600 | static int sipcom_match(device_t, cfdata_t, void *); |
601 | static void sipcom_attach(device_t, device_t, void *); |
602 | static void sipcom_do_detach(device_t, enum sip_attach_stage); |
603 | static int sipcom_detach(device_t, int); |
604 | static bool sipcom_resume(device_t, const pmf_qual_t *); |
605 | static bool sipcom_suspend(device_t, const pmf_qual_t *); |
606 | |
607 | int gsip_copy_small = 0; |
608 | int sip_copy_small = 0; |
609 | |
610 | CFATTACH_DECL3_NEW(gsip, sizeof(struct sip_softc), |
611 | sipcom_match, sipcom_attach, sipcom_detach, NULL, NULL, NULL, |
612 | DVF_DETACH_SHUTDOWN); |
613 | CFATTACH_DECL3_NEW(sip, sizeof(struct sip_softc), |
614 | sipcom_match, sipcom_attach, sipcom_detach, NULL, NULL, NULL, |
615 | DVF_DETACH_SHUTDOWN); |
616 | |
617 | /* |
618 | * Descriptions of the variants of the SiS900. |
619 | */ |
620 | struct sip_variant { |
621 | int (*sipv_mii_readreg)(device_t, int, int); |
622 | void (*sipv_mii_writereg)(device_t, int, int, int); |
623 | void (*sipv_mii_statchg)(struct ifnet *); |
624 | void (*sipv_set_filter)(struct sip_softc *); |
625 | void (*sipv_read_macaddr)(struct sip_softc *, |
626 | const struct pci_attach_args *, u_int8_t *); |
627 | }; |
628 | |
629 | static u_int32_t sipcom_mii_bitbang_read(device_t); |
630 | static void sipcom_mii_bitbang_write(device_t, u_int32_t); |
631 | |
632 | static const struct mii_bitbang_ops sipcom_mii_bitbang_ops = { |
633 | sipcom_mii_bitbang_read, |
634 | sipcom_mii_bitbang_write, |
635 | { |
636 | EROMAR_MDIO, /* MII_BIT_MDO */ |
637 | EROMAR_MDIO, /* MII_BIT_MDI */ |
638 | EROMAR_MDC, /* MII_BIT_MDC */ |
639 | EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */ |
640 | 0, /* MII_BIT_DIR_PHY_HOST */ |
641 | } |
642 | }; |
643 | |
644 | static const struct sip_variant sipcom_variant_dp83820 = { |
645 | sipcom_dp83820_mii_readreg, |
646 | sipcom_dp83820_mii_writereg, |
647 | sipcom_dp83820_mii_statchg, |
648 | sipcom_dp83815_set_filter, |
649 | sipcom_dp83820_read_macaddr, |
650 | }; |
651 | |
652 | static const struct sip_variant sipcom_variant_sis900 = { |
653 | sipcom_sis900_mii_readreg, |
654 | sipcom_sis900_mii_writereg, |
655 | sipcom_sis900_mii_statchg, |
656 | sipcom_sis900_set_filter, |
657 | sipcom_sis900_read_macaddr, |
658 | }; |
659 | |
660 | static const struct sip_variant sipcom_variant_dp83815 = { |
661 | sipcom_dp83815_mii_readreg, |
662 | sipcom_dp83815_mii_writereg, |
663 | sipcom_dp83815_mii_statchg, |
664 | sipcom_dp83815_set_filter, |
665 | sipcom_dp83815_read_macaddr, |
666 | }; |
667 | |
668 | |
669 | /* |
670 | * Devices supported by this driver. |
671 | */ |
672 | static const struct sip_product { |
673 | pci_vendor_id_t sip_vendor; |
674 | pci_product_id_t sip_product; |
675 | const char *sip_name; |
676 | const struct sip_variant *sip_variant; |
677 | int sip_gigabit; |
678 | } sipcom_products[] = { |
679 | { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820, |
680 | "NatSemi DP83820 Gigabit Ethernet" , |
681 | &sipcom_variant_dp83820, 1 }, |
682 | { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, |
683 | "SiS 900 10/100 Ethernet" , |
684 | &sipcom_variant_sis900, 0 }, |
685 | { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016, |
686 | "SiS 7016 10/100 Ethernet" , |
687 | &sipcom_variant_sis900, 0 }, |
688 | |
689 | { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815, |
690 | "NatSemi DP83815 10/100 Ethernet" , |
691 | &sipcom_variant_dp83815, 0 }, |
692 | |
693 | { 0, 0, |
694 | NULL, |
695 | NULL, 0 }, |
696 | }; |
697 | |
698 | static const struct sip_product * |
699 | sipcom_lookup(const struct pci_attach_args *pa, bool gigabit) |
700 | { |
701 | const struct sip_product *sip; |
702 | |
703 | for (sip = sipcom_products; sip->sip_name != NULL; sip++) { |
704 | if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor && |
705 | PCI_PRODUCT(pa->pa_id) == sip->sip_product && |
706 | sip->sip_gigabit == gigabit) |
707 | return sip; |
708 | } |
709 | return NULL; |
710 | } |
711 | |
712 | /* |
713 | * I really hate stupid hardware vendors. There's a bit in the EEPROM |
714 | * which indicates if the card can do 64-bit data transfers. Unfortunately, |
715 | * several vendors of 32-bit cards fail to clear this bit in the EEPROM, |
716 | * which means we try to use 64-bit data transfers on those cards if we |
717 | * happen to be plugged into a 32-bit slot. |
718 | * |
719 | * What we do is use this table of cards known to be 64-bit cards. If |
720 | * you have a 64-bit card who's subsystem ID is not listed in this table, |
721 | * send the output of "pcictl dump ..." of the device to me so that your |
722 | * card will use the 64-bit data path when plugged into a 64-bit slot. |
723 | * |
724 | * -- Jason R. Thorpe <thorpej@NetBSD.org> |
725 | * June 30, 2002 |
726 | */ |
727 | static int |
728 | sipcom_check_64bit(const struct pci_attach_args *pa) |
729 | { |
730 | static const struct { |
731 | pci_vendor_id_t c64_vendor; |
732 | pci_product_id_t c64_product; |
733 | } card64[] = { |
734 | /* Asante GigaNIX */ |
735 | { 0x128a, 0x0002 }, |
736 | |
737 | /* Accton EN1407-T, Planex GN-1000TE */ |
738 | { 0x1113, 0x1407 }, |
739 | |
740 | /* Netgear GA621 */ |
741 | { 0x1385, 0x621a }, |
742 | |
743 | /* Netgear GA622 */ |
744 | { 0x1385, 0x622a }, |
745 | |
746 | /* SMC EZ Card 1000 (9462TX) */ |
747 | { 0x10b8, 0x9462 }, |
748 | |
749 | { 0, 0} |
750 | }; |
751 | pcireg_t subsys; |
752 | int i; |
753 | |
754 | subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); |
755 | |
756 | for (i = 0; card64[i].c64_vendor != 0; i++) { |
757 | if (PCI_VENDOR(subsys) == card64[i].c64_vendor && |
758 | PCI_PRODUCT(subsys) == card64[i].c64_product) |
759 | return (1); |
760 | } |
761 | |
762 | return (0); |
763 | } |
764 | |
765 | static int |
766 | sipcom_match(device_t parent, cfdata_t cf, void *aux) |
767 | { |
768 | struct pci_attach_args *pa = aux; |
769 | |
770 | if (sipcom_lookup(pa, strcmp(cf->cf_name, "gsip" ) == 0) != NULL) |
771 | return 1; |
772 | |
773 | return 0; |
774 | } |
775 | |
776 | static void |
777 | sipcom_dp83820_attach(struct sip_softc *sc, struct pci_attach_args *pa) |
778 | { |
779 | u_int32_t reg; |
780 | int i; |
781 | |
782 | /* |
783 | * Cause the chip to load configuration data from the EEPROM. |
784 | */ |
785 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_PTSCR, PTSCR_EELOAD_EN); |
786 | for (i = 0; i < 10000; i++) { |
787 | delay(10); |
788 | if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) & |
789 | PTSCR_EELOAD_EN) == 0) |
790 | break; |
791 | } |
792 | if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) & |
793 | PTSCR_EELOAD_EN) { |
794 | printf("%s: timeout loading configuration from EEPROM\n" , |
795 | device_xname(sc->sc_dev)); |
796 | return; |
797 | } |
798 | |
799 | sc->sc_gpior = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_GPIOR); |
800 | |
801 | reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG); |
802 | if (reg & CFG_PCI64_DET) { |
803 | printf("%s: 64-bit PCI slot detected" , device_xname(sc->sc_dev)); |
804 | /* |
805 | * Check to see if this card is 64-bit. If so, enable 64-bit |
806 | * data transfers. |
807 | * |
808 | * We can't use the DATA64_EN bit in the EEPROM, because |
809 | * vendors of 32-bit cards fail to clear that bit in many |
810 | * cases (yet the card still detects that it's in a 64-bit |
811 | * slot; go figure). |
812 | */ |
813 | if (sipcom_check_64bit(pa)) { |
814 | sc->sc_cfg |= CFG_DATA64_EN; |
815 | printf(", using 64-bit data transfers" ); |
816 | } |
817 | printf("\n" ); |
818 | } |
819 | |
820 | /* |
821 | * XXX Need some PCI flags indicating support for |
822 | * XXX 64-bit addressing. |
823 | */ |
824 | #if 0 |
825 | if (reg & CFG_M64ADDR) |
826 | sc->sc_cfg |= CFG_M64ADDR; |
827 | if (reg & CFG_T64ADDR) |
828 | sc->sc_cfg |= CFG_T64ADDR; |
829 | #endif |
830 | |
831 | if (reg & (CFG_TBI_EN|CFG_EXT_125)) { |
832 | const char *sep = "" ; |
833 | printf("%s: using " , device_xname(sc->sc_dev)); |
834 | if (reg & CFG_EXT_125) { |
835 | sc->sc_cfg |= CFG_EXT_125; |
836 | printf("%s125MHz clock" , sep); |
837 | sep = ", " ; |
838 | } |
839 | if (reg & CFG_TBI_EN) { |
840 | sc->sc_cfg |= CFG_TBI_EN; |
841 | printf("%sten-bit interface" , sep); |
842 | sep = ", " ; |
843 | } |
844 | printf("\n" ); |
845 | } |
846 | if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0 || |
847 | (reg & CFG_MRM_DIS) != 0) |
848 | sc->sc_cfg |= CFG_MRM_DIS; |
849 | if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0 || |
850 | (reg & CFG_MWI_DIS) != 0) |
851 | sc->sc_cfg |= CFG_MWI_DIS; |
852 | |
853 | /* |
854 | * Use the extended descriptor format on the DP83820. This |
855 | * gives us an interface to VLAN tagging and IPv4/TCP/UDP |
856 | * checksumming. |
857 | */ |
858 | sc->sc_cfg |= CFG_EXTSTS_EN; |
859 | } |
860 | |
861 | static int |
862 | sipcom_detach(device_t self, int flags) |
863 | { |
864 | int s; |
865 | |
866 | s = splnet(); |
867 | sipcom_do_detach(self, SIP_ATTACH_FIN); |
868 | splx(s); |
869 | |
870 | return 0; |
871 | } |
872 | |
873 | static void |
874 | sipcom_do_detach(device_t self, enum sip_attach_stage stage) |
875 | { |
876 | int i; |
877 | struct sip_softc *sc = device_private(self); |
878 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
879 | |
880 | /* |
881 | * Free any resources we've allocated during attach. |
882 | * Do this in reverse order and fall through. |
883 | */ |
884 | switch (stage) { |
885 | case SIP_ATTACH_FIN: |
886 | sipcom_stop(ifp, 1); |
887 | pmf_device_deregister(self); |
888 | #ifdef SIP_EVENT_COUNTERS |
889 | /* |
890 | * Attach event counters. |
891 | */ |
892 | evcnt_detach(&sc->sc_ev_txforceintr); |
893 | evcnt_detach(&sc->sc_ev_txdstall); |
894 | evcnt_detach(&sc->sc_ev_txsstall); |
895 | evcnt_detach(&sc->sc_ev_hiberr); |
896 | evcnt_detach(&sc->sc_ev_rxintr); |
897 | evcnt_detach(&sc->sc_ev_txiintr); |
898 | evcnt_detach(&sc->sc_ev_txdintr); |
899 | if (!sc->sc_gigabit) { |
900 | evcnt_detach(&sc->sc_ev_rxpause); |
901 | } else { |
902 | evcnt_detach(&sc->sc_ev_txudpsum); |
903 | evcnt_detach(&sc->sc_ev_txtcpsum); |
904 | evcnt_detach(&sc->sc_ev_txipsum); |
905 | evcnt_detach(&sc->sc_ev_rxudpsum); |
906 | evcnt_detach(&sc->sc_ev_rxtcpsum); |
907 | evcnt_detach(&sc->sc_ev_rxipsum); |
908 | evcnt_detach(&sc->sc_ev_txpause); |
909 | evcnt_detach(&sc->sc_ev_rxpause); |
910 | } |
911 | #endif /* SIP_EVENT_COUNTERS */ |
912 | |
913 | rnd_detach_source(&sc->rnd_source); |
914 | |
915 | ether_ifdetach(ifp); |
916 | if_detach(ifp); |
917 | mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); |
918 | |
919 | /*FALLTHROUGH*/ |
920 | case SIP_ATTACH_CREATE_RXMAP: |
921 | for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { |
922 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL) |
923 | bus_dmamap_destroy(sc->sc_dmat, |
924 | sc->sc_rxsoft[i].rxs_dmamap); |
925 | } |
926 | /*FALLTHROUGH*/ |
927 | case SIP_ATTACH_CREATE_TXMAP: |
928 | for (i = 0; i < SIP_TXQUEUELEN; i++) { |
929 | if (sc->sc_txsoft[i].txs_dmamap != NULL) |
930 | bus_dmamap_destroy(sc->sc_dmat, |
931 | sc->sc_txsoft[i].txs_dmamap); |
932 | } |
933 | /*FALLTHROUGH*/ |
934 | case SIP_ATTACH_LOAD_MAP: |
935 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); |
936 | /*FALLTHROUGH*/ |
937 | case SIP_ATTACH_CREATE_MAP: |
938 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); |
939 | /*FALLTHROUGH*/ |
940 | case SIP_ATTACH_MAP_MEM: |
941 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, |
942 | sizeof(struct sip_control_data)); |
943 | /*FALLTHROUGH*/ |
944 | case SIP_ATTACH_ALLOC_MEM: |
945 | bus_dmamem_free(sc->sc_dmat, &sc->sc_seg, 1); |
946 | /* FALLTHROUGH*/ |
947 | case SIP_ATTACH_INTR: |
948 | pci_intr_disestablish(sc->sc_pc, sc->sc_ih); |
949 | /* FALLTHROUGH*/ |
950 | case SIP_ATTACH_MAP: |
951 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); |
952 | break; |
953 | default: |
954 | break; |
955 | } |
956 | return; |
957 | } |
958 | |
959 | static bool |
960 | sipcom_resume(device_t self, const pmf_qual_t *qual) |
961 | { |
962 | struct sip_softc *sc = device_private(self); |
963 | |
964 | return sipcom_reset(sc); |
965 | } |
966 | |
967 | static bool |
968 | sipcom_suspend(device_t self, const pmf_qual_t *qual) |
969 | { |
970 | struct sip_softc *sc = device_private(self); |
971 | |
972 | sipcom_rxdrain(sc); |
973 | return true; |
974 | } |
975 | |
976 | static void |
977 | sipcom_attach(device_t parent, device_t self, void *aux) |
978 | { |
979 | struct sip_softc *sc = device_private(self); |
980 | struct pci_attach_args *pa = aux; |
981 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
982 | pci_chipset_tag_t pc = pa->pa_pc; |
983 | pci_intr_handle_t ih; |
984 | const char *intrstr = NULL; |
985 | bus_space_tag_t iot, memt; |
986 | bus_space_handle_t ioh, memh; |
987 | bus_size_t iosz, memsz; |
988 | int ioh_valid, memh_valid; |
989 | int i, rseg, error; |
990 | const struct sip_product *sip; |
991 | u_int8_t enaddr[ETHER_ADDR_LEN]; |
992 | pcireg_t csr; |
993 | pcireg_t memtype; |
994 | bus_size_t tx_dmamap_size; |
995 | int ntxsegs_alloc; |
996 | cfdata_t cf = device_cfdata(self); |
997 | char intrbuf[PCI_INTRSTR_LEN]; |
998 | |
999 | callout_init(&sc->sc_tick_ch, 0); |
1000 | |
1001 | sip = sipcom_lookup(pa, strcmp(cf->cf_name, "gsip" ) == 0); |
1002 | if (sip == NULL) { |
1003 | aprint_error("\n" ); |
1004 | panic("%s: impossible" , __func__); |
1005 | } |
1006 | sc->sc_dev = self; |
1007 | sc->sc_gigabit = sip->sip_gigabit; |
1008 | pmf_self_suspensor_init(self, &sc->sc_suspensor, &sc->sc_qual); |
1009 | sc->sc_pc = pc; |
1010 | |
1011 | if (sc->sc_gigabit) { |
1012 | sc->sc_rxintr = gsip_rxintr; |
1013 | sc->sc_parm = &gsip_parm; |
1014 | } else { |
1015 | sc->sc_rxintr = sip_rxintr; |
1016 | sc->sc_parm = &sip_parm; |
1017 | } |
1018 | tx_dmamap_size = sc->sc_parm->p_tx_dmamap_size; |
1019 | ntxsegs_alloc = sc->sc_parm->p_ntxsegs_alloc; |
1020 | sc->sc_ntxdesc = SIP_TXQUEUELEN * ntxsegs_alloc; |
1021 | sc->sc_ntxdesc_mask = sc->sc_ntxdesc - 1; |
1022 | sc->sc_nrxdesc_mask = sc->sc_parm->p_nrxdesc - 1; |
1023 | |
1024 | sc->sc_rev = PCI_REVISION(pa->pa_class); |
1025 | |
1026 | aprint_naive("\n" ); |
1027 | aprint_normal(": %s, rev %#02x\n" , sip->sip_name, sc->sc_rev); |
1028 | |
1029 | sc->sc_model = sip; |
1030 | |
1031 | /* |
1032 | * XXX Work-around broken PXE firmware on some boards. |
1033 | * |
1034 | * The DP83815 shares an address decoder with the MEM BAR |
1035 | * and the ROM BAR. Make sure the ROM BAR is disabled, |
1036 | * so that memory mapped access works. |
1037 | */ |
1038 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, |
1039 | pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) & |
1040 | ~PCI_MAPREG_ROM_ENABLE); |
1041 | |
1042 | /* |
1043 | * Map the device. |
1044 | */ |
1045 | ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA, |
1046 | PCI_MAPREG_TYPE_IO, 0, |
1047 | &iot, &ioh, NULL, &iosz) == 0); |
1048 | if (sc->sc_gigabit) { |
1049 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA); |
1050 | switch (memtype) { |
1051 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
1052 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
1053 | memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA, |
1054 | memtype, 0, &memt, &memh, NULL, &memsz) == 0); |
1055 | break; |
1056 | default: |
1057 | memh_valid = 0; |
1058 | } |
1059 | } else { |
1060 | memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA, |
1061 | PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, |
1062 | &memt, &memh, NULL, &memsz) == 0); |
1063 | } |
1064 | |
1065 | if (memh_valid) { |
1066 | sc->sc_st = memt; |
1067 | sc->sc_sh = memh; |
1068 | sc->sc_sz = memsz; |
1069 | } else if (ioh_valid) { |
1070 | sc->sc_st = iot; |
1071 | sc->sc_sh = ioh; |
1072 | sc->sc_sz = iosz; |
1073 | } else { |
1074 | aprint_error_dev(self, "unable to map device registers\n" ); |
1075 | return; |
1076 | } |
1077 | |
1078 | sc->sc_dmat = pa->pa_dmat; |
1079 | |
1080 | /* |
1081 | * Make sure bus mastering is enabled. Also make sure |
1082 | * Write/Invalidate is enabled if we're allowed to use it. |
1083 | */ |
1084 | csr = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
1085 | if (pa->pa_flags & PCI_FLAGS_MWI_OKAY) |
1086 | csr |= PCI_COMMAND_INVALIDATE_ENABLE; |
1087 | pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, |
1088 | csr | PCI_COMMAND_MASTER_ENABLE); |
1089 | |
1090 | /* power up chip */ |
1091 | error = pci_activate(pa->pa_pc, pa->pa_tag, self, pci_activate_null); |
1092 | if (error != 0 && error != EOPNOTSUPP) { |
1093 | aprint_error_dev(sc->sc_dev, "cannot activate %d\n" , error); |
1094 | return; |
1095 | } |
1096 | |
1097 | /* |
1098 | * Map and establish our interrupt. |
1099 | */ |
1100 | if (pci_intr_map(pa, &ih)) { |
1101 | aprint_error_dev(sc->sc_dev, "unable to map interrupt\n" ); |
1102 | return; |
1103 | } |
1104 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); |
1105 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sipcom_intr, sc); |
1106 | if (sc->sc_ih == NULL) { |
1107 | aprint_error_dev(sc->sc_dev, "unable to establish interrupt" ); |
1108 | if (intrstr != NULL) |
1109 | aprint_error(" at %s" , intrstr); |
1110 | aprint_error("\n" ); |
1111 | sipcom_do_detach(self, SIP_ATTACH_MAP); |
1112 | return; |
1113 | } |
1114 | aprint_normal_dev(sc->sc_dev, "interrupting at %s\n" , intrstr); |
1115 | |
1116 | SIMPLEQ_INIT(&sc->sc_txfreeq); |
1117 | SIMPLEQ_INIT(&sc->sc_txdirtyq); |
1118 | |
1119 | /* |
1120 | * Allocate the control data structures, and create and load the |
1121 | * DMA map for it. |
1122 | */ |
1123 | if ((error = bus_dmamem_alloc(sc->sc_dmat, |
1124 | sizeof(struct sip_control_data), PAGE_SIZE, 0, &sc->sc_seg, 1, |
1125 | &rseg, 0)) != 0) { |
1126 | aprint_error_dev(sc->sc_dev, |
1127 | "unable to allocate control data, error = %d\n" , error); |
1128 | sipcom_do_detach(self, SIP_ATTACH_INTR); |
1129 | return; |
1130 | } |
1131 | |
1132 | if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_seg, rseg, |
1133 | sizeof(struct sip_control_data), (void **)&sc->sc_control_data, |
1134 | BUS_DMA_COHERENT)) != 0) { |
1135 | aprint_error_dev(sc->sc_dev, |
1136 | "unable to map control data, error = %d\n" , error); |
1137 | sipcom_do_detach(self, SIP_ATTACH_ALLOC_MEM); |
1138 | } |
1139 | |
1140 | if ((error = bus_dmamap_create(sc->sc_dmat, |
1141 | sizeof(struct sip_control_data), 1, |
1142 | sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { |
1143 | aprint_error_dev(self, "unable to create control data DMA map" |
1144 | ", error = %d\n" , error); |
1145 | sipcom_do_detach(self, SIP_ATTACH_MAP_MEM); |
1146 | } |
1147 | |
1148 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, |
1149 | sc->sc_control_data, sizeof(struct sip_control_data), NULL, |
1150 | 0)) != 0) { |
1151 | aprint_error_dev(self, "unable to load control data DMA map" |
1152 | ", error = %d\n" , error); |
1153 | sipcom_do_detach(self, SIP_ATTACH_CREATE_MAP); |
1154 | } |
1155 | |
1156 | /* |
1157 | * Create the transmit buffer DMA maps. |
1158 | */ |
1159 | for (i = 0; i < SIP_TXQUEUELEN; i++) { |
1160 | if ((error = bus_dmamap_create(sc->sc_dmat, tx_dmamap_size, |
1161 | sc->sc_parm->p_ntxsegs, MCLBYTES, 0, 0, |
1162 | &sc->sc_txsoft[i].txs_dmamap)) != 0) { |
1163 | aprint_error_dev(self, "unable to create tx DMA map %d" |
1164 | ", error = %d\n" , i, error); |
1165 | sipcom_do_detach(self, SIP_ATTACH_CREATE_TXMAP); |
1166 | } |
1167 | } |
1168 | |
1169 | /* |
1170 | * Create the receive buffer DMA maps. |
1171 | */ |
1172 | for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { |
1173 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
1174 | MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { |
1175 | aprint_error_dev(self, "unable to create rx DMA map %d" |
1176 | ", error = %d\n" , i, error); |
1177 | sipcom_do_detach(self, SIP_ATTACH_CREATE_RXMAP); |
1178 | } |
1179 | sc->sc_rxsoft[i].rxs_mbuf = NULL; |
1180 | } |
1181 | |
1182 | /* |
1183 | * Reset the chip to a known state. |
1184 | */ |
1185 | sipcom_reset(sc); |
1186 | |
1187 | /* |
1188 | * Read the Ethernet address from the EEPROM. This might |
1189 | * also fetch other stuff from the EEPROM and stash it |
1190 | * in the softc. |
1191 | */ |
1192 | sc->sc_cfg = 0; |
1193 | if (!sc->sc_gigabit) { |
1194 | if (SIP_SIS900_REV(sc,SIS_REV_635) || |
1195 | SIP_SIS900_REV(sc,SIS_REV_900B)) |
1196 | sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT); |
1197 | |
1198 | if (SIP_SIS900_REV(sc,SIS_REV_635) || |
1199 | SIP_SIS900_REV(sc,SIS_REV_960) || |
1200 | SIP_SIS900_REV(sc,SIS_REV_900B)) |
1201 | sc->sc_cfg |= |
1202 | (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & |
1203 | CFG_EDBMASTEN); |
1204 | } |
1205 | |
1206 | (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr); |
1207 | |
1208 | aprint_normal_dev(self, "Ethernet address %s\n" ,ether_sprintf(enaddr)); |
1209 | |
1210 | /* |
1211 | * Initialize the configuration register: aggressive PCI |
1212 | * bus request algorithm, default backoff, default OW timer, |
1213 | * default parity error detection. |
1214 | * |
1215 | * NOTE: "Big endian mode" is useless on the SiS900 and |
1216 | * friends -- it affects packet data, not descriptors. |
1217 | */ |
1218 | if (sc->sc_gigabit) |
1219 | sipcom_dp83820_attach(sc, pa); |
1220 | |
1221 | /* |
1222 | * Initialize our media structures and probe the MII. |
1223 | */ |
1224 | sc->sc_mii.mii_ifp = ifp; |
1225 | sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg; |
1226 | sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg; |
1227 | sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg; |
1228 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
1229 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, |
1230 | sipcom_mediastatus); |
1231 | |
1232 | /* |
1233 | * XXX We cannot handle flow control on the DP83815. |
1234 | */ |
1235 | if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) |
1236 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
1237 | MII_OFFSET_ANY, 0); |
1238 | else |
1239 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
1240 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
1241 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { |
1242 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
1243 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); |
1244 | } else |
1245 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
1246 | |
1247 | ifp = &sc->sc_ethercom.ec_if; |
1248 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
1249 | ifp->if_softc = sc; |
1250 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
1251 | sc->sc_if_flags = ifp->if_flags; |
1252 | ifp->if_ioctl = sipcom_ioctl; |
1253 | ifp->if_start = sipcom_start; |
1254 | ifp->if_watchdog = sipcom_watchdog; |
1255 | ifp->if_init = sipcom_init; |
1256 | ifp->if_stop = sipcom_stop; |
1257 | IFQ_SET_READY(&ifp->if_snd); |
1258 | |
1259 | /* |
1260 | * We can support 802.1Q VLAN-sized frames. |
1261 | */ |
1262 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
1263 | |
1264 | if (sc->sc_gigabit) { |
1265 | /* |
1266 | * And the DP83820 can do VLAN tagging in hardware, and |
1267 | * support the jumbo Ethernet MTU. |
1268 | */ |
1269 | sc->sc_ethercom.ec_capabilities |= |
1270 | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU; |
1271 | |
1272 | /* |
1273 | * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums |
1274 | * in hardware. |
1275 | */ |
1276 | ifp->if_capabilities |= |
1277 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
1278 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
1279 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
1280 | } |
1281 | |
1282 | /* |
1283 | * Attach the interface. |
1284 | */ |
1285 | if_attach(ifp); |
1286 | ether_ifattach(ifp, enaddr); |
1287 | ether_set_ifflags_cb(&sc->sc_ethercom, sip_ifflags_cb); |
1288 | sc->sc_prev.ec_capenable = sc->sc_ethercom.ec_capenable; |
1289 | sc->sc_prev.is_vlan = VLAN_ATTACHED(&(sc)->sc_ethercom); |
1290 | sc->sc_prev.if_capenable = ifp->if_capenable; |
1291 | rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), |
1292 | RND_TYPE_NET, RND_FLAG_DEFAULT); |
1293 | |
1294 | /* |
1295 | * The number of bytes that must be available in |
1296 | * the Tx FIFO before the bus master can DMA more |
1297 | * data into the FIFO. |
1298 | */ |
1299 | sc->sc_tx_fill_thresh = 64 / 32; |
1300 | |
1301 | /* |
1302 | * Start at a drain threshold of 512 bytes. We will |
1303 | * increase it if a DMA underrun occurs. |
1304 | * |
1305 | * XXX The minimum value of this variable should be |
1306 | * tuned. We may be able to improve performance |
1307 | * by starting with a lower value. That, however, |
1308 | * may trash the first few outgoing packets if the |
1309 | * PCI bus is saturated. |
1310 | */ |
1311 | if (sc->sc_gigabit) |
1312 | sc->sc_tx_drain_thresh = 6400 / 32; /* from FreeBSD nge(4) */ |
1313 | else |
1314 | sc->sc_tx_drain_thresh = 1504 / 32; |
1315 | |
1316 | /* |
1317 | * Initialize the Rx FIFO drain threshold. |
1318 | * |
1319 | * This is in units of 8 bytes. |
1320 | * |
1321 | * We should never set this value lower than 2; 14 bytes are |
1322 | * required to filter the packet. |
1323 | */ |
1324 | sc->sc_rx_drain_thresh = 128 / 8; |
1325 | |
1326 | #ifdef SIP_EVENT_COUNTERS |
1327 | /* |
1328 | * Attach event counters. |
1329 | */ |
1330 | evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, |
1331 | NULL, device_xname(sc->sc_dev), "txsstall" ); |
1332 | evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, |
1333 | NULL, device_xname(sc->sc_dev), "txdstall" ); |
1334 | evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR, |
1335 | NULL, device_xname(sc->sc_dev), "txforceintr" ); |
1336 | evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR, |
1337 | NULL, device_xname(sc->sc_dev), "txdintr" ); |
1338 | evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR, |
1339 | NULL, device_xname(sc->sc_dev), "txiintr" ); |
1340 | evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, |
1341 | NULL, device_xname(sc->sc_dev), "rxintr" ); |
1342 | evcnt_attach_dynamic(&sc->sc_ev_hiberr, EVCNT_TYPE_INTR, |
1343 | NULL, device_xname(sc->sc_dev), "hiberr" ); |
1344 | if (!sc->sc_gigabit) { |
1345 | evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_INTR, |
1346 | NULL, device_xname(sc->sc_dev), "rxpause" ); |
1347 | } else { |
1348 | evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_MISC, |
1349 | NULL, device_xname(sc->sc_dev), "rxpause" ); |
1350 | evcnt_attach_dynamic(&sc->sc_ev_txpause, EVCNT_TYPE_MISC, |
1351 | NULL, device_xname(sc->sc_dev), "txpause" ); |
1352 | evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, |
1353 | NULL, device_xname(sc->sc_dev), "rxipsum" ); |
1354 | evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC, |
1355 | NULL, device_xname(sc->sc_dev), "rxtcpsum" ); |
1356 | evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC, |
1357 | NULL, device_xname(sc->sc_dev), "rxudpsum" ); |
1358 | evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, |
1359 | NULL, device_xname(sc->sc_dev), "txipsum" ); |
1360 | evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC, |
1361 | NULL, device_xname(sc->sc_dev), "txtcpsum" ); |
1362 | evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC, |
1363 | NULL, device_xname(sc->sc_dev), "txudpsum" ); |
1364 | } |
1365 | #endif /* SIP_EVENT_COUNTERS */ |
1366 | |
1367 | if (pmf_device_register(self, sipcom_suspend, sipcom_resume)) |
1368 | pmf_class_network_register(self, ifp); |
1369 | else |
1370 | aprint_error_dev(self, "couldn't establish power handler\n" ); |
1371 | } |
1372 | |
1373 | static inline void |
1374 | sipcom_set_extsts(struct sip_softc *sc, int lasttx, struct mbuf *m0, |
1375 | uint64_t capenable) |
1376 | { |
1377 | struct m_tag *mtag; |
1378 | u_int32_t extsts; |
1379 | #ifdef DEBUG |
1380 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1381 | #endif |
1382 | /* |
1383 | * If VLANs are enabled and the packet has a VLAN tag, set |
1384 | * up the descriptor to encapsulate the packet for us. |
1385 | * |
1386 | * This apparently has to be on the last descriptor of |
1387 | * the packet. |
1388 | */ |
1389 | |
1390 | /* |
1391 | * Byte swapping is tricky. We need to provide the tag |
1392 | * in a network byte order. On a big-endian machine, |
1393 | * the byteorder is correct, but we need to swap it |
1394 | * anyway, because this will be undone by the outside |
1395 | * htole32(). That's why there must be an |
1396 | * unconditional swap instead of htons() inside. |
1397 | */ |
1398 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { |
1399 | sc->sc_txdescs[lasttx].sipd_extsts |= |
1400 | htole32(EXTSTS_VPKT | |
1401 | (bswap16(VLAN_TAG_VALUE(mtag)) & |
1402 | EXTSTS_VTCI)); |
1403 | } |
1404 | |
1405 | /* |
1406 | * If the upper-layer has requested IPv4/TCPv4/UDPv4 |
1407 | * checksumming, set up the descriptor to do this work |
1408 | * for us. |
1409 | * |
1410 | * This apparently has to be on the first descriptor of |
1411 | * the packet. |
1412 | * |
1413 | * Byte-swap constants so the compiler can optimize. |
1414 | */ |
1415 | extsts = 0; |
1416 | if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { |
1417 | KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4_Tx); |
1418 | SIP_EVCNT_INCR(&sc->sc_ev_txipsum); |
1419 | extsts |= htole32(EXTSTS_IPPKT); |
1420 | } |
1421 | if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) { |
1422 | KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx); |
1423 | SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum); |
1424 | extsts |= htole32(EXTSTS_TCPPKT); |
1425 | } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) { |
1426 | KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx); |
1427 | SIP_EVCNT_INCR(&sc->sc_ev_txudpsum); |
1428 | extsts |= htole32(EXTSTS_UDPPKT); |
1429 | } |
1430 | sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts; |
1431 | } |
1432 | |
1433 | /* |
1434 | * sip_start: [ifnet interface function] |
1435 | * |
1436 | * Start packet transmission on the interface. |
1437 | */ |
1438 | static void |
1439 | sipcom_start(struct ifnet *ifp) |
1440 | { |
1441 | struct sip_softc *sc = ifp->if_softc; |
1442 | struct mbuf *m0; |
1443 | struct mbuf *m; |
1444 | struct sip_txsoft *txs; |
1445 | bus_dmamap_t dmamap; |
1446 | int error, nexttx, lasttx, seg; |
1447 | int ofree = sc->sc_txfree; |
1448 | #if 0 |
1449 | int firsttx = sc->sc_txnext; |
1450 | #endif |
1451 | |
1452 | /* |
1453 | * If we've been told to pause, don't transmit any more packets. |
1454 | */ |
1455 | if (!sc->sc_gigabit && sc->sc_paused) |
1456 | ifp->if_flags |= IFF_OACTIVE; |
1457 | |
1458 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) |
1459 | return; |
1460 | |
1461 | /* |
1462 | * Loop through the send queue, setting up transmit descriptors |
1463 | * until we drain the queue, or use up all available transmit |
1464 | * descriptors. |
1465 | */ |
1466 | for (;;) { |
1467 | /* Get a work queue entry. */ |
1468 | if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) { |
1469 | SIP_EVCNT_INCR(&sc->sc_ev_txsstall); |
1470 | break; |
1471 | } |
1472 | |
1473 | /* |
1474 | * Grab a packet off the queue. |
1475 | */ |
1476 | IFQ_POLL(&ifp->if_snd, m0); |
1477 | if (m0 == NULL) |
1478 | break; |
1479 | m = NULL; |
1480 | |
1481 | dmamap = txs->txs_dmamap; |
1482 | |
1483 | /* |
1484 | * Load the DMA map. If this fails, the packet either |
1485 | * didn't fit in the alloted number of segments, or we |
1486 | * were short on resources. |
1487 | */ |
1488 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
1489 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
1490 | /* In the non-gigabit case, we'll copy and try again. */ |
1491 | if (error != 0 && !sc->sc_gigabit) { |
1492 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1493 | if (m == NULL) { |
1494 | printf("%s: unable to allocate Tx mbuf\n" , |
1495 | device_xname(sc->sc_dev)); |
1496 | break; |
1497 | } |
1498 | MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); |
1499 | if (m0->m_pkthdr.len > MHLEN) { |
1500 | MCLGET(m, M_DONTWAIT); |
1501 | if ((m->m_flags & M_EXT) == 0) { |
1502 | printf("%s: unable to allocate Tx " |
1503 | "cluster\n" , |
1504 | device_xname(sc->sc_dev)); |
1505 | m_freem(m); |
1506 | break; |
1507 | } |
1508 | } |
1509 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); |
1510 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; |
1511 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, |
1512 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
1513 | if (error) { |
1514 | printf("%s: unable to load Tx buffer, error = " |
1515 | "%d\n" , device_xname(sc->sc_dev), error); |
1516 | break; |
1517 | } |
1518 | } else if (error == EFBIG) { |
1519 | /* |
1520 | * For the too-many-segments case, we simply |
1521 | * report an error and drop the packet, |
1522 | * since we can't sanely copy a jumbo packet |
1523 | * to a single buffer. |
1524 | */ |
1525 | printf("%s: Tx packet consumes too many DMA segments, " |
1526 | "dropping...\n" , device_xname(sc->sc_dev)); |
1527 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
1528 | m_freem(m0); |
1529 | continue; |
1530 | } else if (error != 0) { |
1531 | /* |
1532 | * Short on resources, just stop for now. |
1533 | */ |
1534 | break; |
1535 | } |
1536 | |
1537 | /* |
1538 | * Ensure we have enough descriptors free to describe |
1539 | * the packet. Note, we always reserve one descriptor |
1540 | * at the end of the ring as a termination point, to |
1541 | * prevent wrap-around. |
1542 | */ |
1543 | if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) { |
1544 | /* |
1545 | * Not enough free descriptors to transmit this |
1546 | * packet. We haven't committed anything yet, |
1547 | * so just unload the DMA map, put the packet |
1548 | * back on the queue, and punt. Notify the upper |
1549 | * layer that there are not more slots left. |
1550 | * |
1551 | * XXX We could allocate an mbuf and copy, but |
1552 | * XXX is it worth it? |
1553 | */ |
1554 | ifp->if_flags |= IFF_OACTIVE; |
1555 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
1556 | if (m != NULL) |
1557 | m_freem(m); |
1558 | SIP_EVCNT_INCR(&sc->sc_ev_txdstall); |
1559 | break; |
1560 | } |
1561 | |
1562 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
1563 | if (m != NULL) { |
1564 | m_freem(m0); |
1565 | m0 = m; |
1566 | } |
1567 | |
1568 | /* |
1569 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. |
1570 | */ |
1571 | |
1572 | /* Sync the DMA map. */ |
1573 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
1574 | BUS_DMASYNC_PREWRITE); |
1575 | |
1576 | /* |
1577 | * Initialize the transmit descriptors. |
1578 | */ |
1579 | for (nexttx = lasttx = sc->sc_txnext, seg = 0; |
1580 | seg < dmamap->dm_nsegs; |
1581 | seg++, nexttx = sip_nexttx(sc, nexttx)) { |
1582 | /* |
1583 | * If this is the first descriptor we're |
1584 | * enqueueing, don't set the OWN bit just |
1585 | * yet. That could cause a race condition. |
1586 | * We'll do it below. |
1587 | */ |
1588 | *sipd_bufptr(sc, &sc->sc_txdescs[nexttx]) = |
1589 | htole32(dmamap->dm_segs[seg].ds_addr); |
1590 | *sipd_cmdsts(sc, &sc->sc_txdescs[nexttx]) = |
1591 | htole32((nexttx == sc->sc_txnext ? 0 : CMDSTS_OWN) | |
1592 | CMDSTS_MORE | dmamap->dm_segs[seg].ds_len); |
1593 | sc->sc_txdescs[nexttx].sipd_extsts = 0; |
1594 | lasttx = nexttx; |
1595 | } |
1596 | |
1597 | /* Clear the MORE bit on the last segment. */ |
1598 | *sipd_cmdsts(sc, &sc->sc_txdescs[lasttx]) &= |
1599 | htole32(~CMDSTS_MORE); |
1600 | |
1601 | /* |
1602 | * If we're in the interrupt delay window, delay the |
1603 | * interrupt. |
1604 | */ |
1605 | if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) { |
1606 | SIP_EVCNT_INCR(&sc->sc_ev_txforceintr); |
1607 | *sipd_cmdsts(sc, &sc->sc_txdescs[lasttx]) |= |
1608 | htole32(CMDSTS_INTR); |
1609 | sc->sc_txwin = 0; |
1610 | } |
1611 | |
1612 | if (sc->sc_gigabit) |
1613 | sipcom_set_extsts(sc, lasttx, m0, ifp->if_capenable); |
1614 | |
1615 | /* Sync the descriptors we're using. */ |
1616 | sip_cdtxsync(sc, sc->sc_txnext, dmamap->dm_nsegs, |
1617 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1618 | |
1619 | /* |
1620 | * The entire packet is set up. Give the first descrptor |
1621 | * to the chip now. |
1622 | */ |
1623 | *sipd_cmdsts(sc, &sc->sc_txdescs[sc->sc_txnext]) |= |
1624 | htole32(CMDSTS_OWN); |
1625 | sip_cdtxsync(sc, sc->sc_txnext, 1, |
1626 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1627 | |
1628 | /* |
1629 | * Store a pointer to the packet so we can free it later, |
1630 | * and remember what txdirty will be once the packet is |
1631 | * done. |
1632 | */ |
1633 | txs->txs_mbuf = m0; |
1634 | txs->txs_firstdesc = sc->sc_txnext; |
1635 | txs->txs_lastdesc = lasttx; |
1636 | |
1637 | /* Advance the tx pointer. */ |
1638 | sc->sc_txfree -= dmamap->dm_nsegs; |
1639 | sc->sc_txnext = nexttx; |
1640 | |
1641 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); |
1642 | SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); |
1643 | |
1644 | /* |
1645 | * Pass the packet to any BPF listeners. |
1646 | */ |
1647 | bpf_mtap(ifp, m0); |
1648 | } |
1649 | |
1650 | if (txs == NULL || sc->sc_txfree == 0) { |
1651 | /* No more slots left; notify upper layer. */ |
1652 | ifp->if_flags |= IFF_OACTIVE; |
1653 | } |
1654 | |
1655 | if (sc->sc_txfree != ofree) { |
1656 | /* |
1657 | * Start the transmit process. Note, the manual says |
1658 | * that if there are no pending transmissions in the |
1659 | * chip's internal queue (indicated by TXE being clear), |
1660 | * then the driver software must set the TXDP to the |
1661 | * first descriptor to be transmitted. However, if we |
1662 | * do this, it causes serious performance degredation on |
1663 | * the DP83820 under load, not setting TXDP doesn't seem |
1664 | * to adversely affect the SiS 900 or DP83815. |
1665 | * |
1666 | * Well, I guess it wouldn't be the first time a manual |
1667 | * has lied -- and they could be speaking of the NULL- |
1668 | * terminated descriptor list case, rather than OWN- |
1669 | * terminated rings. |
1670 | */ |
1671 | #if 0 |
1672 | if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) & |
1673 | CR_TXE) == 0) { |
1674 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP, |
1675 | SIP_CDTXADDR(sc, firsttx)); |
1676 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE); |
1677 | } |
1678 | #else |
1679 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE); |
1680 | #endif |
1681 | |
1682 | /* Set a watchdog timer in case the chip flakes out. */ |
1683 | /* Gigabit autonegotiation takes 5 seconds. */ |
1684 | ifp->if_timer = (sc->sc_gigabit) ? 10 : 5; |
1685 | } |
1686 | } |
1687 | |
1688 | /* |
1689 | * sip_watchdog: [ifnet interface function] |
1690 | * |
1691 | * Watchdog timer handler. |
1692 | */ |
1693 | static void |
1694 | sipcom_watchdog(struct ifnet *ifp) |
1695 | { |
1696 | struct sip_softc *sc = ifp->if_softc; |
1697 | |
1698 | /* |
1699 | * The chip seems to ignore the CMDSTS_INTR bit sometimes! |
1700 | * If we get a timeout, try and sweep up transmit descriptors. |
1701 | * If we manage to sweep them all up, ignore the lack of |
1702 | * interrupt. |
1703 | */ |
1704 | sipcom_txintr(sc); |
1705 | |
1706 | if (sc->sc_txfree != sc->sc_ntxdesc) { |
1707 | printf("%s: device timeout\n" , device_xname(sc->sc_dev)); |
1708 | ifp->if_oerrors++; |
1709 | |
1710 | /* Reset the interface. */ |
1711 | (void) sipcom_init(ifp); |
1712 | } else if (ifp->if_flags & IFF_DEBUG) |
1713 | printf("%s: recovered from device timeout\n" , |
1714 | device_xname(sc->sc_dev)); |
1715 | |
1716 | /* Try to get more packets going. */ |
1717 | sipcom_start(ifp); |
1718 | } |
1719 | |
1720 | /* If the interface is up and running, only modify the receive |
1721 | * filter when setting promiscuous or debug mode. Otherwise fall |
1722 | * through to ether_ioctl, which will reset the chip. |
1723 | */ |
1724 | static int |
1725 | sip_ifflags_cb(struct ethercom *ec) |
1726 | { |
1727 | #define COMPARE_EC(sc) (((sc)->sc_prev.ec_capenable \ |
1728 | == (sc)->sc_ethercom.ec_capenable) \ |
1729 | && ((sc)->sc_prev.is_vlan == \ |
1730 | VLAN_ATTACHED(&(sc)->sc_ethercom) )) |
1731 | #define COMPARE_IC(sc, ifp) ((sc)->sc_prev.if_capenable == (ifp)->if_capenable) |
1732 | struct ifnet *ifp = &ec->ec_if; |
1733 | struct sip_softc *sc = ifp->if_softc; |
1734 | int change = ifp->if_flags ^ sc->sc_if_flags; |
1735 | |
1736 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0 || !COMPARE_EC(sc) || |
1737 | !COMPARE_IC(sc, ifp)) |
1738 | return ENETRESET; |
1739 | /* Set up the receive filter. */ |
1740 | (*sc->sc_model->sip_variant->sipv_set_filter)(sc); |
1741 | return 0; |
1742 | } |
1743 | |
1744 | /* |
1745 | * sip_ioctl: [ifnet interface function] |
1746 | * |
1747 | * Handle control requests from the operator. |
1748 | */ |
1749 | static int |
1750 | sipcom_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
1751 | { |
1752 | struct sip_softc *sc = ifp->if_softc; |
1753 | struct ifreq *ifr = (struct ifreq *)data; |
1754 | int s, error; |
1755 | |
1756 | s = splnet(); |
1757 | |
1758 | switch (cmd) { |
1759 | case SIOCSIFMEDIA: |
1760 | /* Flow control requires full-duplex mode. */ |
1761 | if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || |
1762 | (ifr->ifr_media & IFM_FDX) == 0) |
1763 | ifr->ifr_media &= ~IFM_ETH_FMASK; |
1764 | |
1765 | /* XXX */ |
1766 | if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) |
1767 | ifr->ifr_media &= ~IFM_ETH_FMASK; |
1768 | if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { |
1769 | if (sc->sc_gigabit && |
1770 | (ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { |
1771 | /* We can do both TXPAUSE and RXPAUSE. */ |
1772 | ifr->ifr_media |= |
1773 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
1774 | } else if (ifr->ifr_media & IFM_FLOW) { |
1775 | /* |
1776 | * Both TXPAUSE and RXPAUSE must be set. |
1777 | * (SiS900 and DP83815 don't have PAUSE_ASYM |
1778 | * feature.) |
1779 | * |
1780 | * XXX Can SiS900 and DP83815 send PAUSE? |
1781 | */ |
1782 | ifr->ifr_media |= |
1783 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
1784 | } |
1785 | sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; |
1786 | } |
1787 | /*FALLTHROUGH*/ |
1788 | default: |
1789 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) |
1790 | break; |
1791 | |
1792 | error = 0; |
1793 | |
1794 | if (cmd == SIOCSIFCAP) |
1795 | error = (*ifp->if_init)(ifp); |
1796 | else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
1797 | ; |
1798 | else if (ifp->if_flags & IFF_RUNNING) { |
1799 | /* |
1800 | * Multicast list has changed; set the hardware filter |
1801 | * accordingly. |
1802 | */ |
1803 | (*sc->sc_model->sip_variant->sipv_set_filter)(sc); |
1804 | } |
1805 | break; |
1806 | } |
1807 | |
1808 | /* Try to get more packets going. */ |
1809 | sipcom_start(ifp); |
1810 | |
1811 | sc->sc_if_flags = ifp->if_flags; |
1812 | splx(s); |
1813 | return (error); |
1814 | } |
1815 | |
1816 | /* |
1817 | * sip_intr: |
1818 | * |
1819 | * Interrupt service routine. |
1820 | */ |
1821 | static int |
1822 | sipcom_intr(void *arg) |
1823 | { |
1824 | struct sip_softc *sc = arg; |
1825 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1826 | u_int32_t isr; |
1827 | int handled = 0; |
1828 | |
1829 | if (!device_activation(sc->sc_dev, DEVACT_LEVEL_DRIVER)) |
1830 | return 0; |
1831 | |
1832 | /* Disable interrupts. */ |
1833 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, 0); |
1834 | |
1835 | for (;;) { |
1836 | /* Reading clears interrupt. */ |
1837 | isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR); |
1838 | if ((isr & sc->sc_imr) == 0) |
1839 | break; |
1840 | |
1841 | rnd_add_uint32(&sc->rnd_source, isr); |
1842 | |
1843 | handled = 1; |
1844 | |
1845 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
1846 | break; |
1847 | |
1848 | if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) { |
1849 | SIP_EVCNT_INCR(&sc->sc_ev_rxintr); |
1850 | |
1851 | /* Grab any new packets. */ |
1852 | (*sc->sc_rxintr)(sc); |
1853 | |
1854 | if (isr & ISR_RXORN) { |
1855 | printf("%s: receive FIFO overrun\n" , |
1856 | device_xname(sc->sc_dev)); |
1857 | |
1858 | /* XXX adjust rx_drain_thresh? */ |
1859 | } |
1860 | |
1861 | if (isr & ISR_RXIDLE) { |
1862 | printf("%s: receive ring overrun\n" , |
1863 | device_xname(sc->sc_dev)); |
1864 | |
1865 | /* Get the receive process going again. */ |
1866 | bus_space_write_4(sc->sc_st, sc->sc_sh, |
1867 | SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr)); |
1868 | bus_space_write_4(sc->sc_st, sc->sc_sh, |
1869 | SIP_CR, CR_RXE); |
1870 | } |
1871 | } |
1872 | |
1873 | if (isr & (ISR_TXURN|ISR_TXDESC|ISR_TXIDLE)) { |
1874 | #ifdef SIP_EVENT_COUNTERS |
1875 | if (isr & ISR_TXDESC) |
1876 | SIP_EVCNT_INCR(&sc->sc_ev_txdintr); |
1877 | else if (isr & ISR_TXIDLE) |
1878 | SIP_EVCNT_INCR(&sc->sc_ev_txiintr); |
1879 | #endif |
1880 | |
1881 | /* Sweep up transmit descriptors. */ |
1882 | sipcom_txintr(sc); |
1883 | |
1884 | if (isr & ISR_TXURN) { |
1885 | u_int32_t thresh; |
1886 | int txfifo_size = (sc->sc_gigabit) |
1887 | ? DP83820_SIP_TXFIFO_SIZE |
1888 | : OTHER_SIP_TXFIFO_SIZE; |
1889 | |
1890 | printf("%s: transmit FIFO underrun" , |
1891 | device_xname(sc->sc_dev)); |
1892 | thresh = sc->sc_tx_drain_thresh + 1; |
1893 | if (thresh <= __SHIFTOUT_MASK(sc->sc_bits.b_txcfg_drth_mask) |
1894 | && (thresh * 32) <= (txfifo_size - |
1895 | (sc->sc_tx_fill_thresh * 32))) { |
1896 | printf("; increasing Tx drain " |
1897 | "threshold to %u bytes\n" , |
1898 | thresh * 32); |
1899 | sc->sc_tx_drain_thresh = thresh; |
1900 | (void) sipcom_init(ifp); |
1901 | } else { |
1902 | (void) sipcom_init(ifp); |
1903 | printf("\n" ); |
1904 | } |
1905 | } |
1906 | } |
1907 | |
1908 | if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) { |
1909 | if (isr & ISR_PAUSE_ST) { |
1910 | sc->sc_paused = 1; |
1911 | SIP_EVCNT_INCR(&sc->sc_ev_rxpause); |
1912 | ifp->if_flags |= IFF_OACTIVE; |
1913 | } |
1914 | if (isr & ISR_PAUSE_END) { |
1915 | sc->sc_paused = 0; |
1916 | ifp->if_flags &= ~IFF_OACTIVE; |
1917 | } |
1918 | } |
1919 | |
1920 | if (isr & ISR_HIBERR) { |
1921 | int want_init = 0; |
1922 | |
1923 | SIP_EVCNT_INCR(&sc->sc_ev_hiberr); |
1924 | |
1925 | #define PRINTERR(bit, str) \ |
1926 | do { \ |
1927 | if ((isr & (bit)) != 0) { \ |
1928 | if ((ifp->if_flags & IFF_DEBUG) != 0) \ |
1929 | printf("%s: %s\n", \ |
1930 | device_xname(sc->sc_dev), str); \ |
1931 | want_init = 1; \ |
1932 | } \ |
1933 | } while (/*CONSTCOND*/0) |
1934 | |
1935 | PRINTERR(sc->sc_bits.b_isr_dperr, "parity error" ); |
1936 | PRINTERR(sc->sc_bits.b_isr_sserr, "system error" ); |
1937 | PRINTERR(sc->sc_bits.b_isr_rmabt, "master abort" ); |
1938 | PRINTERR(sc->sc_bits.b_isr_rtabt, "target abort" ); |
1939 | PRINTERR(ISR_RXSOVR, "receive status FIFO overrun" ); |
1940 | /* |
1941 | * Ignore: |
1942 | * Tx reset complete |
1943 | * Rx reset complete |
1944 | */ |
1945 | if (want_init) |
1946 | (void) sipcom_init(ifp); |
1947 | #undef PRINTERR |
1948 | } |
1949 | } |
1950 | |
1951 | /* Re-enable interrupts. */ |
1952 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, IER_IE); |
1953 | |
1954 | /* Try to get more packets going. */ |
1955 | sipcom_start(ifp); |
1956 | |
1957 | return (handled); |
1958 | } |
1959 | |
1960 | /* |
1961 | * sip_txintr: |
1962 | * |
1963 | * Helper; handle transmit interrupts. |
1964 | */ |
1965 | static void |
1966 | sipcom_txintr(struct sip_softc *sc) |
1967 | { |
1968 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1969 | struct sip_txsoft *txs; |
1970 | u_int32_t cmdsts; |
1971 | |
1972 | if (sc->sc_paused == 0) |
1973 | ifp->if_flags &= ~IFF_OACTIVE; |
1974 | |
1975 | /* |
1976 | * Go through our Tx list and free mbufs for those |
1977 | * frames which have been transmitted. |
1978 | */ |
1979 | while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { |
1980 | sip_cdtxsync(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, |
1981 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1982 | |
1983 | cmdsts = le32toh(*sipd_cmdsts(sc, |
1984 | &sc->sc_txdescs[txs->txs_lastdesc])); |
1985 | if (cmdsts & CMDSTS_OWN) |
1986 | break; |
1987 | |
1988 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); |
1989 | |
1990 | sc->sc_txfree += txs->txs_dmamap->dm_nsegs; |
1991 | |
1992 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, |
1993 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
1994 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
1995 | m_freem(txs->txs_mbuf); |
1996 | txs->txs_mbuf = NULL; |
1997 | |
1998 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); |
1999 | |
2000 | /* |
2001 | * Check for errors and collisions. |
2002 | */ |
2003 | if (cmdsts & |
2004 | (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) { |
2005 | ifp->if_oerrors++; |
2006 | if (cmdsts & CMDSTS_Tx_EC) |
2007 | ifp->if_collisions += 16; |
2008 | if (ifp->if_flags & IFF_DEBUG) { |
2009 | if (cmdsts & CMDSTS_Tx_ED) |
2010 | printf("%s: excessive deferral\n" , |
2011 | device_xname(sc->sc_dev)); |
2012 | if (cmdsts & CMDSTS_Tx_EC) |
2013 | printf("%s: excessive collisions\n" , |
2014 | device_xname(sc->sc_dev)); |
2015 | } |
2016 | } else { |
2017 | /* Packet was transmitted successfully. */ |
2018 | ifp->if_opackets++; |
2019 | ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts); |
2020 | } |
2021 | } |
2022 | |
2023 | /* |
2024 | * If there are no more pending transmissions, cancel the watchdog |
2025 | * timer. |
2026 | */ |
2027 | if (txs == NULL) { |
2028 | ifp->if_timer = 0; |
2029 | sc->sc_txwin = 0; |
2030 | } |
2031 | } |
2032 | |
2033 | /* |
2034 | * gsip_rxintr: |
2035 | * |
2036 | * Helper; handle receive interrupts on gigabit parts. |
2037 | */ |
2038 | static void |
2039 | gsip_rxintr(struct sip_softc *sc) |
2040 | { |
2041 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
2042 | struct sip_rxsoft *rxs; |
2043 | struct mbuf *m; |
2044 | u_int32_t cmdsts, extsts; |
2045 | int i, len; |
2046 | |
2047 | for (i = sc->sc_rxptr;; i = sip_nextrx(sc, i)) { |
2048 | rxs = &sc->sc_rxsoft[i]; |
2049 | |
2050 | sip_cdrxsync(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
2051 | |
2052 | cmdsts = le32toh(*sipd_cmdsts(sc, &sc->sc_rxdescs[i])); |
2053 | extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts); |
2054 | len = CMDSTS_SIZE(sc, cmdsts); |
2055 | |
2056 | /* |
2057 | * NOTE: OWN is set if owned by _consumer_. We're the |
2058 | * consumer of the receive ring, so if the bit is clear, |
2059 | * we have processed all of the packets. |
2060 | */ |
2061 | if ((cmdsts & CMDSTS_OWN) == 0) { |
2062 | /* |
2063 | * We have processed all of the receive buffers. |
2064 | */ |
2065 | break; |
2066 | } |
2067 | |
2068 | if (__predict_false(sc->sc_rxdiscard)) { |
2069 | sip_init_rxdesc(sc, i); |
2070 | if ((cmdsts & CMDSTS_MORE) == 0) { |
2071 | /* Reset our state. */ |
2072 | sc->sc_rxdiscard = 0; |
2073 | } |
2074 | continue; |
2075 | } |
2076 | |
2077 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
2078 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2079 | |
2080 | m = rxs->rxs_mbuf; |
2081 | |
2082 | /* |
2083 | * Add a new receive buffer to the ring. |
2084 | */ |
2085 | if (sipcom_add_rxbuf(sc, i) != 0) { |
2086 | /* |
2087 | * Failed, throw away what we've done so |
2088 | * far, and discard the rest of the packet. |
2089 | */ |
2090 | ifp->if_ierrors++; |
2091 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
2092 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
2093 | sip_init_rxdesc(sc, i); |
2094 | if (cmdsts & CMDSTS_MORE) |
2095 | sc->sc_rxdiscard = 1; |
2096 | if (sc->sc_rxhead != NULL) |
2097 | m_freem(sc->sc_rxhead); |
2098 | sip_rxchain_reset(sc); |
2099 | continue; |
2100 | } |
2101 | |
2102 | sip_rxchain_link(sc, m); |
2103 | |
2104 | m->m_len = len; |
2105 | |
2106 | /* |
2107 | * If this is not the end of the packet, keep |
2108 | * looking. |
2109 | */ |
2110 | if (cmdsts & CMDSTS_MORE) { |
2111 | sc->sc_rxlen += len; |
2112 | continue; |
2113 | } |
2114 | |
2115 | /* |
2116 | * Okay, we have the entire packet now. The chip includes |
2117 | * the FCS, so we need to trim it. |
2118 | */ |
2119 | m->m_len -= ETHER_CRC_LEN; |
2120 | |
2121 | *sc->sc_rxtailp = NULL; |
2122 | len = m->m_len + sc->sc_rxlen; |
2123 | m = sc->sc_rxhead; |
2124 | |
2125 | sip_rxchain_reset(sc); |
2126 | |
2127 | /* |
2128 | * If an error occurred, update stats and drop the packet. |
2129 | */ |
2130 | if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT| |
2131 | CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) { |
2132 | ifp->if_ierrors++; |
2133 | if ((cmdsts & CMDSTS_Rx_RXA) != 0 && |
2134 | (cmdsts & CMDSTS_Rx_RXO) == 0) { |
2135 | /* Receive overrun handled elsewhere. */ |
2136 | printf("%s: receive descriptor error\n" , |
2137 | device_xname(sc->sc_dev)); |
2138 | } |
2139 | #define PRINTERR(bit, str) \ |
2140 | if ((ifp->if_flags & IFF_DEBUG) != 0 && \ |
2141 | (cmdsts & (bit)) != 0) \ |
2142 | printf("%s: %s\n", device_xname(sc->sc_dev), str) |
2143 | PRINTERR(CMDSTS_Rx_RUNT, "runt packet" ); |
2144 | PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error" ); |
2145 | PRINTERR(CMDSTS_Rx_CRCE, "CRC error" ); |
2146 | PRINTERR(CMDSTS_Rx_FAE, "frame alignment error" ); |
2147 | #undef PRINTERR |
2148 | m_freem(m); |
2149 | continue; |
2150 | } |
2151 | |
2152 | /* |
2153 | * If the packet is small enough to fit in a |
2154 | * single header mbuf, allocate one and copy |
2155 | * the data into it. This greatly reduces |
2156 | * memory consumption when we receive lots |
2157 | * of small packets. |
2158 | */ |
2159 | if (gsip_copy_small != 0 && len <= (MHLEN - 2)) { |
2160 | struct mbuf *nm; |
2161 | MGETHDR(nm, M_DONTWAIT, MT_DATA); |
2162 | if (nm == NULL) { |
2163 | ifp->if_ierrors++; |
2164 | m_freem(m); |
2165 | continue; |
2166 | } |
2167 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); |
2168 | nm->m_data += 2; |
2169 | nm->m_pkthdr.len = nm->m_len = len; |
2170 | m_copydata(m, 0, len, mtod(nm, void *)); |
2171 | m_freem(m); |
2172 | m = nm; |
2173 | } |
2174 | #ifndef __NO_STRICT_ALIGNMENT |
2175 | else { |
2176 | /* |
2177 | * The DP83820's receive buffers must be 4-byte |
2178 | * aligned. But this means that the data after |
2179 | * the Ethernet header is misaligned. To compensate, |
2180 | * we have artificially shortened the buffer size |
2181 | * in the descriptor, and we do an overlapping copy |
2182 | * of the data two bytes further in (in the first |
2183 | * buffer of the chain only). |
2184 | */ |
2185 | memmove(mtod(m, char *) + 2, mtod(m, void *), |
2186 | m->m_len); |
2187 | m->m_data += 2; |
2188 | } |
2189 | #endif /* ! __NO_STRICT_ALIGNMENT */ |
2190 | |
2191 | /* |
2192 | * If VLANs are enabled, VLAN packets have been unwrapped |
2193 | * for us. Associate the tag with the packet. |
2194 | */ |
2195 | |
2196 | /* |
2197 | * Again, byte swapping is tricky. Hardware provided |
2198 | * the tag in the network byte order, but extsts was |
2199 | * passed through le32toh() in the meantime. On a |
2200 | * big-endian machine, we need to swap it again. On a |
2201 | * little-endian machine, we need to convert from the |
2202 | * network to host byte order. This means that we must |
2203 | * swap it in any case, so unconditional swap instead |
2204 | * of htons() is used. |
2205 | */ |
2206 | if ((extsts & EXTSTS_VPKT) != 0) { |
2207 | VLAN_INPUT_TAG(ifp, m, bswap16(extsts & EXTSTS_VTCI), |
2208 | continue); |
2209 | } |
2210 | |
2211 | /* |
2212 | * Set the incoming checksum information for the |
2213 | * packet. |
2214 | */ |
2215 | if ((extsts & EXTSTS_IPPKT) != 0) { |
2216 | SIP_EVCNT_INCR(&sc->sc_ev_rxipsum); |
2217 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
2218 | if (extsts & EXTSTS_Rx_IPERR) |
2219 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; |
2220 | if (extsts & EXTSTS_TCPPKT) { |
2221 | SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum); |
2222 | m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; |
2223 | if (extsts & EXTSTS_Rx_TCPERR) |
2224 | m->m_pkthdr.csum_flags |= |
2225 | M_CSUM_TCP_UDP_BAD; |
2226 | } else if (extsts & EXTSTS_UDPPKT) { |
2227 | SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum); |
2228 | m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; |
2229 | if (extsts & EXTSTS_Rx_UDPERR) |
2230 | m->m_pkthdr.csum_flags |= |
2231 | M_CSUM_TCP_UDP_BAD; |
2232 | } |
2233 | } |
2234 | |
2235 | ifp->if_ipackets++; |
2236 | m_set_rcvif(m, ifp); |
2237 | m->m_pkthdr.len = len; |
2238 | |
2239 | /* |
2240 | * Pass this up to any BPF listeners, but only |
2241 | * pass if up the stack if it's for us. |
2242 | */ |
2243 | bpf_mtap(ifp, m); |
2244 | |
2245 | /* Pass it on. */ |
2246 | if_percpuq_enqueue(ifp->if_percpuq, m); |
2247 | } |
2248 | |
2249 | /* Update the receive pointer. */ |
2250 | sc->sc_rxptr = i; |
2251 | } |
2252 | |
2253 | /* |
2254 | * sip_rxintr: |
2255 | * |
2256 | * Helper; handle receive interrupts on 10/100 parts. |
2257 | */ |
2258 | static void |
2259 | sip_rxintr(struct sip_softc *sc) |
2260 | { |
2261 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
2262 | struct sip_rxsoft *rxs; |
2263 | struct mbuf *m; |
2264 | u_int32_t cmdsts; |
2265 | int i, len; |
2266 | |
2267 | for (i = sc->sc_rxptr;; i = sip_nextrx(sc, i)) { |
2268 | rxs = &sc->sc_rxsoft[i]; |
2269 | |
2270 | sip_cdrxsync(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
2271 | |
2272 | cmdsts = le32toh(*sipd_cmdsts(sc, &sc->sc_rxdescs[i])); |
2273 | |
2274 | /* |
2275 | * NOTE: OWN is set if owned by _consumer_. We're the |
2276 | * consumer of the receive ring, so if the bit is clear, |
2277 | * we have processed all of the packets. |
2278 | */ |
2279 | if ((cmdsts & CMDSTS_OWN) == 0) { |
2280 | /* |
2281 | * We have processed all of the receive buffers. |
2282 | */ |
2283 | break; |
2284 | } |
2285 | |
2286 | /* |
2287 | * If any collisions were seen on the wire, count one. |
2288 | */ |
2289 | if (cmdsts & CMDSTS_Rx_COL) |
2290 | ifp->if_collisions++; |
2291 | |
2292 | /* |
2293 | * If an error occurred, update stats, clear the status |
2294 | * word, and leave the packet buffer in place. It will |
2295 | * simply be reused the next time the ring comes around. |
2296 | */ |
2297 | if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT| |
2298 | CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) { |
2299 | ifp->if_ierrors++; |
2300 | if ((cmdsts & CMDSTS_Rx_RXA) != 0 && |
2301 | (cmdsts & CMDSTS_Rx_RXO) == 0) { |
2302 | /* Receive overrun handled elsewhere. */ |
2303 | printf("%s: receive descriptor error\n" , |
2304 | device_xname(sc->sc_dev)); |
2305 | } |
2306 | #define PRINTERR(bit, str) \ |
2307 | if ((ifp->if_flags & IFF_DEBUG) != 0 && \ |
2308 | (cmdsts & (bit)) != 0) \ |
2309 | printf("%s: %s\n", device_xname(sc->sc_dev), str) |
2310 | PRINTERR(CMDSTS_Rx_RUNT, "runt packet" ); |
2311 | PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error" ); |
2312 | PRINTERR(CMDSTS_Rx_CRCE, "CRC error" ); |
2313 | PRINTERR(CMDSTS_Rx_FAE, "frame alignment error" ); |
2314 | #undef PRINTERR |
2315 | sip_init_rxdesc(sc, i); |
2316 | continue; |
2317 | } |
2318 | |
2319 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
2320 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2321 | |
2322 | /* |
2323 | * No errors; receive the packet. Note, the SiS 900 |
2324 | * includes the CRC with every packet. |
2325 | */ |
2326 | len = CMDSTS_SIZE(sc, cmdsts) - ETHER_CRC_LEN; |
2327 | |
2328 | #ifdef __NO_STRICT_ALIGNMENT |
2329 | /* |
2330 | * If the packet is small enough to fit in a |
2331 | * single header mbuf, allocate one and copy |
2332 | * the data into it. This greatly reduces |
2333 | * memory consumption when we receive lots |
2334 | * of small packets. |
2335 | * |
2336 | * Otherwise, we add a new buffer to the receive |
2337 | * chain. If this fails, we drop the packet and |
2338 | * recycle the old buffer. |
2339 | */ |
2340 | if (sip_copy_small != 0 && len <= MHLEN) { |
2341 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
2342 | if (m == NULL) |
2343 | goto dropit; |
2344 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); |
2345 | memcpy(mtod(m, void *), |
2346 | mtod(rxs->rxs_mbuf, void *), len); |
2347 | sip_init_rxdesc(sc, i); |
2348 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
2349 | rxs->rxs_dmamap->dm_mapsize, |
2350 | BUS_DMASYNC_PREREAD); |
2351 | } else { |
2352 | m = rxs->rxs_mbuf; |
2353 | if (sipcom_add_rxbuf(sc, i) != 0) { |
2354 | dropit: |
2355 | ifp->if_ierrors++; |
2356 | sip_init_rxdesc(sc, i); |
2357 | bus_dmamap_sync(sc->sc_dmat, |
2358 | rxs->rxs_dmamap, 0, |
2359 | rxs->rxs_dmamap->dm_mapsize, |
2360 | BUS_DMASYNC_PREREAD); |
2361 | continue; |
2362 | } |
2363 | } |
2364 | #else |
2365 | /* |
2366 | * The SiS 900's receive buffers must be 4-byte aligned. |
2367 | * But this means that the data after the Ethernet header |
2368 | * is misaligned. We must allocate a new buffer and |
2369 | * copy the data, shifted forward 2 bytes. |
2370 | */ |
2371 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
2372 | if (m == NULL) { |
2373 | dropit: |
2374 | ifp->if_ierrors++; |
2375 | sip_init_rxdesc(sc, i); |
2376 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
2377 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
2378 | continue; |
2379 | } |
2380 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); |
2381 | if (len > (MHLEN - 2)) { |
2382 | MCLGET(m, M_DONTWAIT); |
2383 | if ((m->m_flags & M_EXT) == 0) { |
2384 | m_freem(m); |
2385 | goto dropit; |
2386 | } |
2387 | } |
2388 | m->m_data += 2; |
2389 | |
2390 | /* |
2391 | * Note that we use clusters for incoming frames, so the |
2392 | * buffer is virtually contiguous. |
2393 | */ |
2394 | memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len); |
2395 | |
2396 | /* Allow the receive descriptor to continue using its mbuf. */ |
2397 | sip_init_rxdesc(sc, i); |
2398 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
2399 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
2400 | #endif /* __NO_STRICT_ALIGNMENT */ |
2401 | |
2402 | ifp->if_ipackets++; |
2403 | m_set_rcvif(m, ifp); |
2404 | m->m_pkthdr.len = m->m_len = len; |
2405 | |
2406 | /* |
2407 | * Pass this up to any BPF listeners, but only |
2408 | * pass if up the stack if it's for us. |
2409 | */ |
2410 | bpf_mtap(ifp, m); |
2411 | |
2412 | /* Pass it on. */ |
2413 | if_percpuq_enqueue(ifp->if_percpuq, m); |
2414 | } |
2415 | |
2416 | /* Update the receive pointer. */ |
2417 | sc->sc_rxptr = i; |
2418 | } |
2419 | |
2420 | /* |
2421 | * sip_tick: |
2422 | * |
2423 | * One second timer, used to tick the MII. |
2424 | */ |
2425 | static void |
2426 | sipcom_tick(void *arg) |
2427 | { |
2428 | struct sip_softc *sc = arg; |
2429 | int s; |
2430 | |
2431 | s = splnet(); |
2432 | #ifdef SIP_EVENT_COUNTERS |
2433 | if (sc->sc_gigabit) { |
2434 | /* Read PAUSE related counts from MIB registers. */ |
2435 | sc->sc_ev_rxpause.ev_count += |
2436 | bus_space_read_4(sc->sc_st, sc->sc_sh, |
2437 | SIP_NS_MIB(MIB_RXPauseFrames)) & 0xffff; |
2438 | sc->sc_ev_txpause.ev_count += |
2439 | bus_space_read_4(sc->sc_st, sc->sc_sh, |
2440 | SIP_NS_MIB(MIB_TXPauseFrames)) & 0xffff; |
2441 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_MIBC, MIBC_ACLR); |
2442 | } |
2443 | #endif /* SIP_EVENT_COUNTERS */ |
2444 | mii_tick(&sc->sc_mii); |
2445 | splx(s); |
2446 | |
2447 | callout_reset(&sc->sc_tick_ch, hz, sipcom_tick, sc); |
2448 | } |
2449 | |
2450 | /* |
2451 | * sip_reset: |
2452 | * |
2453 | * Perform a soft reset on the SiS 900. |
2454 | */ |
2455 | static bool |
2456 | sipcom_reset(struct sip_softc *sc) |
2457 | { |
2458 | bus_space_tag_t st = sc->sc_st; |
2459 | bus_space_handle_t sh = sc->sc_sh; |
2460 | int i; |
2461 | |
2462 | bus_space_write_4(st, sh, SIP_IER, 0); |
2463 | bus_space_write_4(st, sh, SIP_IMR, 0); |
2464 | bus_space_write_4(st, sh, SIP_RFCR, 0); |
2465 | bus_space_write_4(st, sh, SIP_CR, CR_RST); |
2466 | |
2467 | for (i = 0; i < SIP_TIMEOUT; i++) { |
2468 | if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0) |
2469 | break; |
2470 | delay(2); |
2471 | } |
2472 | |
2473 | if (i == SIP_TIMEOUT) { |
2474 | printf("%s: reset failed to complete\n" , |
2475 | device_xname(sc->sc_dev)); |
2476 | return false; |
2477 | } |
2478 | |
2479 | delay(1000); |
2480 | |
2481 | if (sc->sc_gigabit) { |
2482 | /* |
2483 | * Set the general purpose I/O bits. Do it here in case we |
2484 | * need to have GPIO set up to talk to the media interface. |
2485 | */ |
2486 | bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior); |
2487 | delay(1000); |
2488 | } |
2489 | return true; |
2490 | } |
2491 | |
2492 | static void |
2493 | sipcom_dp83820_init(struct sip_softc *sc, uint64_t capenable) |
2494 | { |
2495 | u_int32_t reg; |
2496 | bus_space_tag_t st = sc->sc_st; |
2497 | bus_space_handle_t sh = sc->sc_sh; |
2498 | /* |
2499 | * Initialize the VLAN/IP receive control register. |
2500 | * We enable checksum computation on all incoming |
2501 | * packets, and do not reject packets w/ bad checksums. |
2502 | */ |
2503 | reg = 0; |
2504 | if (capenable & |
2505 | (IFCAP_CSUM_IPv4_Rx|IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx)) |
2506 | reg |= VRCR_IPEN; |
2507 | if (VLAN_ATTACHED(&sc->sc_ethercom)) |
2508 | reg |= VRCR_VTDEN|VRCR_VTREN; |
2509 | bus_space_write_4(st, sh, SIP_VRCR, reg); |
2510 | |
2511 | /* |
2512 | * Initialize the VLAN/IP transmit control register. |
2513 | * We enable outgoing checksum computation on a |
2514 | * per-packet basis. |
2515 | */ |
2516 | reg = 0; |
2517 | if (capenable & |
2518 | (IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx)) |
2519 | reg |= VTCR_PPCHK; |
2520 | if (VLAN_ATTACHED(&sc->sc_ethercom)) |
2521 | reg |= VTCR_VPPTI; |
2522 | bus_space_write_4(st, sh, SIP_VTCR, reg); |
2523 | |
2524 | /* |
2525 | * If we're using VLANs, initialize the VLAN data register. |
2526 | * To understand why we bswap the VLAN Ethertype, see section |
2527 | * 4.2.36 of the DP83820 manual. |
2528 | */ |
2529 | if (VLAN_ATTACHED(&sc->sc_ethercom)) |
2530 | bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN)); |
2531 | } |
2532 | |
2533 | /* |
2534 | * sip_init: [ ifnet interface function ] |
2535 | * |
2536 | * Initialize the interface. Must be called at splnet(). |
2537 | */ |
2538 | static int |
2539 | sipcom_init(struct ifnet *ifp) |
2540 | { |
2541 | struct sip_softc *sc = ifp->if_softc; |
2542 | bus_space_tag_t st = sc->sc_st; |
2543 | bus_space_handle_t sh = sc->sc_sh; |
2544 | struct sip_txsoft *txs; |
2545 | struct sip_rxsoft *rxs; |
2546 | struct sip_desc *sipd; |
2547 | int i, error = 0; |
2548 | |
2549 | if (device_is_active(sc->sc_dev)) { |
2550 | /* |
2551 | * Cancel any pending I/O. |
2552 | */ |
2553 | sipcom_stop(ifp, 0); |
2554 | } else if (!pmf_device_subtree_resume(sc->sc_dev, &sc->sc_qual) || |
2555 | !device_is_active(sc->sc_dev)) |
2556 | return 0; |
2557 | |
2558 | /* |
2559 | * Reset the chip to a known state. |
2560 | */ |
2561 | if (!sipcom_reset(sc)) |
2562 | return EBUSY; |
2563 | |
2564 | if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) { |
2565 | /* |
2566 | * DP83815 manual, page 78: |
2567 | * 4.4 Recommended Registers Configuration |
2568 | * For optimum performance of the DP83815, version noted |
2569 | * as DP83815CVNG (SRR = 203h), the listed register |
2570 | * modifications must be followed in sequence... |
2571 | * |
2572 | * It's not clear if this should be 302h or 203h because that |
2573 | * chip name is listed as SRR 302h in the description of the |
2574 | * SRR register. However, my revision 302h DP83815 on the |
2575 | * Netgear FA311 purchased in 02/2001 needs these settings |
2576 | * to avoid tons of errors in AcceptPerfectMatch (non- |
2577 | * IFF_PROMISC) mode. I do not know if other revisions need |
2578 | * this set or not. [briggs -- 09 March 2001] |
2579 | * |
2580 | * Note that only the low-order 12 bits of 0xe4 are documented |
2581 | * and that this sets reserved bits in that register. |
2582 | */ |
2583 | bus_space_write_4(st, sh, 0x00cc, 0x0001); |
2584 | |
2585 | bus_space_write_4(st, sh, 0x00e4, 0x189C); |
2586 | bus_space_write_4(st, sh, 0x00fc, 0x0000); |
2587 | bus_space_write_4(st, sh, 0x00f4, 0x5040); |
2588 | bus_space_write_4(st, sh, 0x00f8, 0x008c); |
2589 | |
2590 | bus_space_write_4(st, sh, 0x00cc, 0x0000); |
2591 | } |
2592 | |
2593 | /* |
2594 | * Initialize the transmit descriptor ring. |
2595 | */ |
2596 | for (i = 0; i < sc->sc_ntxdesc; i++) { |
2597 | sipd = &sc->sc_txdescs[i]; |
2598 | memset(sipd, 0, sizeof(struct sip_desc)); |
2599 | sipd->sipd_link = htole32(SIP_CDTXADDR(sc, sip_nexttx(sc, i))); |
2600 | } |
2601 | sip_cdtxsync(sc, 0, sc->sc_ntxdesc, |
2602 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
2603 | sc->sc_txfree = sc->sc_ntxdesc; |
2604 | sc->sc_txnext = 0; |
2605 | sc->sc_txwin = 0; |
2606 | |
2607 | /* |
2608 | * Initialize the transmit job descriptors. |
2609 | */ |
2610 | SIMPLEQ_INIT(&sc->sc_txfreeq); |
2611 | SIMPLEQ_INIT(&sc->sc_txdirtyq); |
2612 | for (i = 0; i < SIP_TXQUEUELEN; i++) { |
2613 | txs = &sc->sc_txsoft[i]; |
2614 | txs->txs_mbuf = NULL; |
2615 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); |
2616 | } |
2617 | |
2618 | /* |
2619 | * Initialize the receive descriptor and receive job |
2620 | * descriptor rings. |
2621 | */ |
2622 | for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { |
2623 | rxs = &sc->sc_rxsoft[i]; |
2624 | if (rxs->rxs_mbuf == NULL) { |
2625 | if ((error = sipcom_add_rxbuf(sc, i)) != 0) { |
2626 | printf("%s: unable to allocate or map rx " |
2627 | "buffer %d, error = %d\n" , |
2628 | device_xname(sc->sc_dev), i, error); |
2629 | /* |
2630 | * XXX Should attempt to run with fewer receive |
2631 | * XXX buffers instead of just failing. |
2632 | */ |
2633 | sipcom_rxdrain(sc); |
2634 | goto out; |
2635 | } |
2636 | } else |
2637 | sip_init_rxdesc(sc, i); |
2638 | } |
2639 | sc->sc_rxptr = 0; |
2640 | sc->sc_rxdiscard = 0; |
2641 | sip_rxchain_reset(sc); |
2642 | |
2643 | /* |
2644 | * Set the configuration register; it's already initialized |
2645 | * in sip_attach(). |
2646 | */ |
2647 | bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg); |
2648 | |
2649 | /* |
2650 | * Initialize the prototype TXCFG register. |
2651 | */ |
2652 | if (sc->sc_gigabit) { |
2653 | sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_512; |
2654 | sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_512; |
2655 | } else if ((SIP_SIS900_REV(sc, SIS_REV_635) || |
2656 | SIP_SIS900_REV(sc, SIS_REV_960) || |
2657 | SIP_SIS900_REV(sc, SIS_REV_900B)) && |
2658 | (sc->sc_cfg & CFG_EDBMASTEN)) { |
2659 | sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_64; |
2660 | sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_64; |
2661 | } else { |
2662 | sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_512; |
2663 | sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_512; |
2664 | } |
2665 | |
2666 | sc->sc_txcfg |= TXCFG_ATP | |
2667 | __SHIFTIN(sc->sc_tx_fill_thresh, sc->sc_bits.b_txcfg_flth_mask) | |
2668 | sc->sc_tx_drain_thresh; |
2669 | bus_space_write_4(st, sh, sc->sc_regs.r_txcfg, sc->sc_txcfg); |
2670 | |
2671 | /* |
2672 | * Initialize the receive drain threshold if we have never |
2673 | * done so. |
2674 | */ |
2675 | if (sc->sc_rx_drain_thresh == 0) { |
2676 | /* |
2677 | * XXX This value should be tuned. This is set to the |
2678 | * maximum of 248 bytes, and we may be able to improve |
2679 | * performance by decreasing it (although we should never |
2680 | * set this value lower than 2; 14 bytes are required to |
2681 | * filter the packet). |
2682 | */ |
2683 | sc->sc_rx_drain_thresh = __SHIFTOUT_MASK(RXCFG_DRTH_MASK); |
2684 | } |
2685 | |
2686 | /* |
2687 | * Initialize the prototype RXCFG register. |
2688 | */ |
2689 | sc->sc_rxcfg |= __SHIFTIN(sc->sc_rx_drain_thresh, RXCFG_DRTH_MASK); |
2690 | /* |
2691 | * Accept long packets (including FCS) so we can handle |
2692 | * 802.1q-tagged frames and jumbo frames properly. |
2693 | */ |
2694 | if ((sc->sc_gigabit && ifp->if_mtu > ETHERMTU) || |
2695 | (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)) |
2696 | sc->sc_rxcfg |= RXCFG_ALP; |
2697 | |
2698 | /* |
2699 | * Checksum offloading is disabled if the user selects an MTU |
2700 | * larger than 8109. (FreeBSD says 8152, but there is emperical |
2701 | * evidence that >8109 does not work on some boards, such as the |
2702 | * Planex GN-1000TE). |
2703 | */ |
2704 | if (sc->sc_gigabit && ifp->if_mtu > 8109 && |
2705 | (ifp->if_capenable & |
2706 | (IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx| |
2707 | IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx| |
2708 | IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx))) { |
2709 | printf("%s: Checksum offloading does not work if MTU > 8109 - " |
2710 | "disabled.\n" , device_xname(sc->sc_dev)); |
2711 | ifp->if_capenable &= |
2712 | ~(IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx| |
2713 | IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx| |
2714 | IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx); |
2715 | ifp->if_csum_flags_tx = 0; |
2716 | ifp->if_csum_flags_rx = 0; |
2717 | } |
2718 | |
2719 | bus_space_write_4(st, sh, sc->sc_regs.r_rxcfg, sc->sc_rxcfg); |
2720 | |
2721 | if (sc->sc_gigabit) |
2722 | sipcom_dp83820_init(sc, ifp->if_capenable); |
2723 | |
2724 | /* |
2725 | * Give the transmit and receive rings to the chip. |
2726 | */ |
2727 | bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext)); |
2728 | bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr)); |
2729 | |
2730 | /* |
2731 | * Initialize the interrupt mask. |
2732 | */ |
2733 | sc->sc_imr = sc->sc_bits.b_isr_dperr | |
2734 | sc->sc_bits.b_isr_sserr | |
2735 | sc->sc_bits.b_isr_rmabt | |
2736 | sc->sc_bits.b_isr_rtabt | ISR_RXSOVR | |
2737 | ISR_TXURN|ISR_TXDESC|ISR_TXIDLE|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC; |
2738 | bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr); |
2739 | |
2740 | /* Set up the receive filter. */ |
2741 | (*sc->sc_model->sip_variant->sipv_set_filter)(sc); |
2742 | |
2743 | /* |
2744 | * Tune sc_rx_flow_thresh. |
2745 | * XXX "More than 8KB" is too short for jumbo frames. |
2746 | * XXX TODO: Threshold value should be user-settable. |
2747 | */ |
2748 | sc->sc_rx_flow_thresh = (PCR_PS_STHI_8 | PCR_PS_STLO_4 | |
2749 | PCR_PS_FFHI_8 | PCR_PS_FFLO_4 | |
2750 | (PCR_PAUSE_CNT & PCR_PAUSE_CNT_MASK)); |
2751 | |
2752 | /* |
2753 | * Set the current media. Do this after initializing the prototype |
2754 | * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow |
2755 | * control. |
2756 | */ |
2757 | if ((error = ether_mediachange(ifp)) != 0) |
2758 | goto out; |
2759 | |
2760 | /* |
2761 | * Set the interrupt hold-off timer to 100us. |
2762 | */ |
2763 | if (sc->sc_gigabit) |
2764 | bus_space_write_4(st, sh, SIP_IHR, 0x01); |
2765 | |
2766 | /* |
2767 | * Enable interrupts. |
2768 | */ |
2769 | bus_space_write_4(st, sh, SIP_IER, IER_IE); |
2770 | |
2771 | /* |
2772 | * Start the transmit and receive processes. |
2773 | */ |
2774 | bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE); |
2775 | |
2776 | /* |
2777 | * Start the one second MII clock. |
2778 | */ |
2779 | callout_reset(&sc->sc_tick_ch, hz, sipcom_tick, sc); |
2780 | |
2781 | /* |
2782 | * ...all done! |
2783 | */ |
2784 | ifp->if_flags |= IFF_RUNNING; |
2785 | ifp->if_flags &= ~IFF_OACTIVE; |
2786 | sc->sc_if_flags = ifp->if_flags; |
2787 | sc->sc_prev.ec_capenable = sc->sc_ethercom.ec_capenable; |
2788 | sc->sc_prev.is_vlan = VLAN_ATTACHED(&(sc)->sc_ethercom); |
2789 | sc->sc_prev.if_capenable = ifp->if_capenable; |
2790 | |
2791 | out: |
2792 | if (error) |
2793 | printf("%s: interface not running\n" , device_xname(sc->sc_dev)); |
2794 | return (error); |
2795 | } |
2796 | |
2797 | /* |
2798 | * sip_drain: |
2799 | * |
2800 | * Drain the receive queue. |
2801 | */ |
2802 | static void |
2803 | sipcom_rxdrain(struct sip_softc *sc) |
2804 | { |
2805 | struct sip_rxsoft *rxs; |
2806 | int i; |
2807 | |
2808 | for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { |
2809 | rxs = &sc->sc_rxsoft[i]; |
2810 | if (rxs->rxs_mbuf != NULL) { |
2811 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
2812 | m_freem(rxs->rxs_mbuf); |
2813 | rxs->rxs_mbuf = NULL; |
2814 | } |
2815 | } |
2816 | } |
2817 | |
2818 | /* |
2819 | * sip_stop: [ ifnet interface function ] |
2820 | * |
2821 | * Stop transmission on the interface. |
2822 | */ |
2823 | static void |
2824 | sipcom_stop(struct ifnet *ifp, int disable) |
2825 | { |
2826 | struct sip_softc *sc = ifp->if_softc; |
2827 | bus_space_tag_t st = sc->sc_st; |
2828 | bus_space_handle_t sh = sc->sc_sh; |
2829 | struct sip_txsoft *txs; |
2830 | u_int32_t cmdsts = 0; /* DEBUG */ |
2831 | |
2832 | /* |
2833 | * Stop the one second clock. |
2834 | */ |
2835 | callout_stop(&sc->sc_tick_ch); |
2836 | |
2837 | /* Down the MII. */ |
2838 | mii_down(&sc->sc_mii); |
2839 | |
2840 | if (device_is_active(sc->sc_dev)) { |
2841 | /* |
2842 | * Disable interrupts. |
2843 | */ |
2844 | bus_space_write_4(st, sh, SIP_IER, 0); |
2845 | |
2846 | /* |
2847 | * Stop receiver and transmitter. |
2848 | */ |
2849 | bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD); |
2850 | } |
2851 | |
2852 | /* |
2853 | * Release any queued transmit buffers. |
2854 | */ |
2855 | while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { |
2856 | if ((ifp->if_flags & IFF_DEBUG) != 0 && |
2857 | SIMPLEQ_NEXT(txs, txs_q) == NULL && |
2858 | (le32toh(*sipd_cmdsts(sc, &sc->sc_txdescs[txs->txs_lastdesc])) & |
2859 | CMDSTS_INTR) == 0) |
2860 | printf("%s: sip_stop: last descriptor does not " |
2861 | "have INTR bit set\n" , device_xname(sc->sc_dev)); |
2862 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); |
2863 | #ifdef DIAGNOSTIC |
2864 | if (txs->txs_mbuf == NULL) { |
2865 | printf("%s: dirty txsoft with no mbuf chain\n" , |
2866 | device_xname(sc->sc_dev)); |
2867 | panic("sip_stop" ); |
2868 | } |
2869 | #endif |
2870 | cmdsts |= /* DEBUG */ |
2871 | le32toh(*sipd_cmdsts(sc, &sc->sc_txdescs[txs->txs_lastdesc])); |
2872 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
2873 | m_freem(txs->txs_mbuf); |
2874 | txs->txs_mbuf = NULL; |
2875 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); |
2876 | } |
2877 | |
2878 | /* |
2879 | * Mark the interface down and cancel the watchdog timer. |
2880 | */ |
2881 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
2882 | ifp->if_timer = 0; |
2883 | |
2884 | if (disable) |
2885 | pmf_device_recursive_suspend(sc->sc_dev, &sc->sc_qual); |
2886 | |
2887 | if ((ifp->if_flags & IFF_DEBUG) != 0 && |
2888 | (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != sc->sc_ntxdesc) |
2889 | printf("%s: sip_stop: no INTR bits set in dirty tx " |
2890 | "descriptors\n" , device_xname(sc->sc_dev)); |
2891 | } |
2892 | |
2893 | /* |
2894 | * sip_read_eeprom: |
2895 | * |
2896 | * Read data from the serial EEPROM. |
2897 | */ |
2898 | static void |
2899 | sipcom_read_eeprom(struct sip_softc *sc, int word, int wordcnt, |
2900 | u_int16_t *data) |
2901 | { |
2902 | bus_space_tag_t st = sc->sc_st; |
2903 | bus_space_handle_t sh = sc->sc_sh; |
2904 | u_int16_t reg; |
2905 | int i, x; |
2906 | |
2907 | for (i = 0; i < wordcnt; i++) { |
2908 | /* Send CHIP SELECT. */ |
2909 | reg = EROMAR_EECS; |
2910 | bus_space_write_4(st, sh, SIP_EROMAR, reg); |
2911 | |
2912 | /* Shift in the READ opcode. */ |
2913 | for (x = 3; x > 0; x--) { |
2914 | if (SIP_EEPROM_OPC_READ & (1 << (x - 1))) |
2915 | reg |= EROMAR_EEDI; |
2916 | else |
2917 | reg &= ~EROMAR_EEDI; |
2918 | bus_space_write_4(st, sh, SIP_EROMAR, reg); |
2919 | bus_space_write_4(st, sh, SIP_EROMAR, |
2920 | reg | EROMAR_EESK); |
2921 | delay(4); |
2922 | bus_space_write_4(st, sh, SIP_EROMAR, reg); |
2923 | delay(4); |
2924 | } |
2925 | |
2926 | /* Shift in address. */ |
2927 | for (x = 6; x > 0; x--) { |
2928 | if ((word + i) & (1 << (x - 1))) |
2929 | reg |= EROMAR_EEDI; |
2930 | else |
2931 | reg &= ~EROMAR_EEDI; |
2932 | bus_space_write_4(st, sh, SIP_EROMAR, reg); |
2933 | bus_space_write_4(st, sh, SIP_EROMAR, |
2934 | reg | EROMAR_EESK); |
2935 | delay(4); |
2936 | bus_space_write_4(st, sh, SIP_EROMAR, reg); |
2937 | delay(4); |
2938 | } |
2939 | |
2940 | /* Shift out data. */ |
2941 | reg = EROMAR_EECS; |
2942 | data[i] = 0; |
2943 | for (x = 16; x > 0; x--) { |
2944 | bus_space_write_4(st, sh, SIP_EROMAR, |
2945 | reg | EROMAR_EESK); |
2946 | delay(4); |
2947 | if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO) |
2948 | data[i] |= (1 << (x - 1)); |
2949 | bus_space_write_4(st, sh, SIP_EROMAR, reg); |
2950 | delay(4); |
2951 | } |
2952 | |
2953 | /* Clear CHIP SELECT. */ |
2954 | bus_space_write_4(st, sh, SIP_EROMAR, 0); |
2955 | delay(4); |
2956 | } |
2957 | } |
2958 | |
2959 | /* |
2960 | * sipcom_add_rxbuf: |
2961 | * |
2962 | * Add a receive buffer to the indicated descriptor. |
2963 | */ |
2964 | static int |
2965 | sipcom_add_rxbuf(struct sip_softc *sc, int idx) |
2966 | { |
2967 | struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx]; |
2968 | struct mbuf *m; |
2969 | int error; |
2970 | |
2971 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
2972 | if (m == NULL) |
2973 | return (ENOBUFS); |
2974 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); |
2975 | |
2976 | MCLGET(m, M_DONTWAIT); |
2977 | if ((m->m_flags & M_EXT) == 0) { |
2978 | m_freem(m); |
2979 | return (ENOBUFS); |
2980 | } |
2981 | |
2982 | /* XXX I don't believe this is necessary. --dyoung */ |
2983 | if (sc->sc_gigabit) |
2984 | m->m_len = sc->sc_parm->p_rxbuf_len; |
2985 | |
2986 | if (rxs->rxs_mbuf != NULL) |
2987 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
2988 | |
2989 | rxs->rxs_mbuf = m; |
2990 | |
2991 | error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, |
2992 | m->m_ext.ext_buf, m->m_ext.ext_size, NULL, |
2993 | BUS_DMA_READ|BUS_DMA_NOWAIT); |
2994 | if (error) { |
2995 | printf("%s: can't load rx DMA map %d, error = %d\n" , |
2996 | device_xname(sc->sc_dev), idx, error); |
2997 | panic("%s" , __func__); /* XXX */ |
2998 | } |
2999 | |
3000 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
3001 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
3002 | |
3003 | sip_init_rxdesc(sc, idx); |
3004 | |
3005 | return (0); |
3006 | } |
3007 | |
3008 | /* |
3009 | * sip_sis900_set_filter: |
3010 | * |
3011 | * Set up the receive filter. |
3012 | */ |
3013 | static void |
3014 | sipcom_sis900_set_filter(struct sip_softc *sc) |
3015 | { |
3016 | bus_space_tag_t st = sc->sc_st; |
3017 | bus_space_handle_t sh = sc->sc_sh; |
3018 | struct ethercom *ec = &sc->sc_ethercom; |
3019 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3020 | struct ether_multi *enm; |
3021 | const u_int8_t *cp; |
3022 | struct ether_multistep step; |
3023 | u_int32_t crc, mchash[16]; |
3024 | |
3025 | /* |
3026 | * Initialize the prototype RFCR. |
3027 | */ |
3028 | sc->sc_rfcr = RFCR_RFEN; |
3029 | if (ifp->if_flags & IFF_BROADCAST) |
3030 | sc->sc_rfcr |= RFCR_AAB; |
3031 | if (ifp->if_flags & IFF_PROMISC) { |
3032 | sc->sc_rfcr |= RFCR_AAP; |
3033 | goto allmulti; |
3034 | } |
3035 | |
3036 | /* |
3037 | * Set up the multicast address filter by passing all multicast |
3038 | * addresses through a CRC generator, and then using the high-order |
3039 | * 6 bits as an index into the 128 bit multicast hash table (only |
3040 | * the lower 16 bits of each 32 bit multicast hash register are |
3041 | * valid). The high order bits select the register, while the |
3042 | * rest of the bits select the bit within the register. |
3043 | */ |
3044 | |
3045 | memset(mchash, 0, sizeof(mchash)); |
3046 | |
3047 | /* |
3048 | * SiS900 (at least SiS963) requires us to register the address of |
3049 | * the PAUSE packet (01:80:c2:00:00:01) into the address filter. |
3050 | */ |
3051 | crc = 0x0ed423f9; |
3052 | |
3053 | if (SIP_SIS900_REV(sc, SIS_REV_635) || |
3054 | SIP_SIS900_REV(sc, SIS_REV_960) || |
3055 | SIP_SIS900_REV(sc, SIS_REV_900B)) { |
3056 | /* Just want the 8 most significant bits. */ |
3057 | crc >>= 24; |
3058 | } else { |
3059 | /* Just want the 7 most significant bits. */ |
3060 | crc >>= 25; |
3061 | } |
3062 | |
3063 | /* Set the corresponding bit in the hash table. */ |
3064 | mchash[crc >> 4] |= 1 << (crc & 0xf); |
3065 | |
3066 | ETHER_FIRST_MULTI(step, ec, enm); |
3067 | while (enm != NULL) { |
3068 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
3069 | /* |
3070 | * We must listen to a range of multicast addresses. |
3071 | * For now, just accept all multicasts, rather than |
3072 | * trying to set only those filter bits needed to match |
3073 | * the range. (At this time, the only use of address |
3074 | * ranges is for IP multicast routing, for which the |
3075 | * range is big enough to require all bits set.) |
3076 | */ |
3077 | goto allmulti; |
3078 | } |
3079 | |
3080 | crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); |
3081 | |
3082 | if (SIP_SIS900_REV(sc, SIS_REV_635) || |
3083 | SIP_SIS900_REV(sc, SIS_REV_960) || |
3084 | SIP_SIS900_REV(sc, SIS_REV_900B)) { |
3085 | /* Just want the 8 most significant bits. */ |
3086 | crc >>= 24; |
3087 | } else { |
3088 | /* Just want the 7 most significant bits. */ |
3089 | crc >>= 25; |
3090 | } |
3091 | |
3092 | /* Set the corresponding bit in the hash table. */ |
3093 | mchash[crc >> 4] |= 1 << (crc & 0xf); |
3094 | |
3095 | ETHER_NEXT_MULTI(step, enm); |
3096 | } |
3097 | |
3098 | ifp->if_flags &= ~IFF_ALLMULTI; |
3099 | goto setit; |
3100 | |
3101 | allmulti: |
3102 | ifp->if_flags |= IFF_ALLMULTI; |
3103 | sc->sc_rfcr |= RFCR_AAM; |
3104 | |
3105 | setit: |
3106 | #define FILTER_EMIT(addr, data) \ |
3107 | bus_space_write_4(st, sh, SIP_RFCR, (addr)); \ |
3108 | delay(1); \ |
3109 | bus_space_write_4(st, sh, SIP_RFDR, (data)); \ |
3110 | delay(1) |
3111 | |
3112 | /* |
3113 | * Disable receive filter, and program the node address. |
3114 | */ |
3115 | cp = CLLADDR(ifp->if_sadl); |
3116 | FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]); |
3117 | FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]); |
3118 | FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]); |
3119 | |
3120 | if ((ifp->if_flags & IFF_ALLMULTI) == 0) { |
3121 | /* |
3122 | * Program the multicast hash table. |
3123 | */ |
3124 | FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]); |
3125 | FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]); |
3126 | FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]); |
3127 | FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]); |
3128 | FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]); |
3129 | FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]); |
3130 | FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]); |
3131 | FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]); |
3132 | if (SIP_SIS900_REV(sc, SIS_REV_635) || |
3133 | SIP_SIS900_REV(sc, SIS_REV_960) || |
3134 | SIP_SIS900_REV(sc, SIS_REV_900B)) { |
3135 | FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]); |
3136 | FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]); |
3137 | FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]); |
3138 | FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]); |
3139 | FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]); |
3140 | FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]); |
3141 | FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]); |
3142 | FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]); |
3143 | } |
3144 | } |
3145 | #undef FILTER_EMIT |
3146 | |
3147 | /* |
3148 | * Re-enable the receiver filter. |
3149 | */ |
3150 | bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr); |
3151 | } |
3152 | |
3153 | /* |
3154 | * sip_dp83815_set_filter: |
3155 | * |
3156 | * Set up the receive filter. |
3157 | */ |
3158 | static void |
3159 | sipcom_dp83815_set_filter(struct sip_softc *sc) |
3160 | { |
3161 | bus_space_tag_t st = sc->sc_st; |
3162 | bus_space_handle_t sh = sc->sc_sh; |
3163 | struct ethercom *ec = &sc->sc_ethercom; |
3164 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3165 | struct ether_multi *enm; |
3166 | const u_int8_t *cp; |
3167 | struct ether_multistep step; |
3168 | u_int32_t crc, hash, slot, bit; |
3169 | #define MCHASH_NWORDS_83820 128 |
3170 | #define MCHASH_NWORDS_83815 32 |
3171 | #define MCHASH_NWORDS MAX(MCHASH_NWORDS_83820, MCHASH_NWORDS_83815) |
3172 | u_int16_t mchash[MCHASH_NWORDS]; |
3173 | int i; |
3174 | |
3175 | /* |
3176 | * Initialize the prototype RFCR. |
3177 | * Enable the receive filter, and accept on |
3178 | * Perfect (destination address) Match |
3179 | * If IFF_BROADCAST, also accept all broadcast packets. |
3180 | * If IFF_PROMISC, accept all unicast packets (and later, set |
3181 | * IFF_ALLMULTI and accept all multicast, too). |
3182 | */ |
3183 | sc->sc_rfcr = RFCR_RFEN | RFCR_APM; |
3184 | if (ifp->if_flags & IFF_BROADCAST) |
3185 | sc->sc_rfcr |= RFCR_AAB; |
3186 | if (ifp->if_flags & IFF_PROMISC) { |
3187 | sc->sc_rfcr |= RFCR_AAP; |
3188 | goto allmulti; |
3189 | } |
3190 | |
3191 | /* |
3192 | * Set up the DP83820/DP83815 multicast address filter by |
3193 | * passing all multicast addresses through a CRC generator, |
3194 | * and then using the high-order 11/9 bits as an index into |
3195 | * the 2048/512 bit multicast hash table. The high-order |
3196 | * 7/5 bits select the slot, while the low-order 4 bits |
3197 | * select the bit within the slot. Note that only the low |
3198 | * 16-bits of each filter word are used, and there are |
3199 | * 128/32 filter words. |
3200 | */ |
3201 | |
3202 | memset(mchash, 0, sizeof(mchash)); |
3203 | |
3204 | ifp->if_flags &= ~IFF_ALLMULTI; |
3205 | ETHER_FIRST_MULTI(step, ec, enm); |
3206 | if (enm == NULL) |
3207 | goto setit; |
3208 | while (enm != NULL) { |
3209 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
3210 | /* |
3211 | * We must listen to a range of multicast addresses. |
3212 | * For now, just accept all multicasts, rather than |
3213 | * trying to set only those filter bits needed to match |
3214 | * the range. (At this time, the only use of address |
3215 | * ranges is for IP multicast routing, for which the |
3216 | * range is big enough to require all bits set.) |
3217 | */ |
3218 | goto allmulti; |
3219 | } |
3220 | |
3221 | crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); |
3222 | |
3223 | if (sc->sc_gigabit) { |
3224 | /* Just want the 11 most significant bits. */ |
3225 | hash = crc >> 21; |
3226 | } else { |
3227 | /* Just want the 9 most significant bits. */ |
3228 | hash = crc >> 23; |
3229 | } |
3230 | |
3231 | slot = hash >> 4; |
3232 | bit = hash & 0xf; |
3233 | |
3234 | /* Set the corresponding bit in the hash table. */ |
3235 | mchash[slot] |= 1 << bit; |
3236 | |
3237 | ETHER_NEXT_MULTI(step, enm); |
3238 | } |
3239 | sc->sc_rfcr |= RFCR_MHEN; |
3240 | goto setit; |
3241 | |
3242 | allmulti: |
3243 | ifp->if_flags |= IFF_ALLMULTI; |
3244 | sc->sc_rfcr |= RFCR_AAM; |
3245 | |
3246 | setit: |
3247 | #define FILTER_EMIT(addr, data) \ |
3248 | bus_space_write_4(st, sh, SIP_RFCR, (addr)); \ |
3249 | delay(1); \ |
3250 | bus_space_write_4(st, sh, SIP_RFDR, (data)); \ |
3251 | delay(1) |
3252 | |
3253 | /* |
3254 | * Disable receive filter, and program the node address. |
3255 | */ |
3256 | cp = CLLADDR(ifp->if_sadl); |
3257 | FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]); |
3258 | FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]); |
3259 | FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]); |
3260 | |
3261 | if ((ifp->if_flags & IFF_ALLMULTI) == 0) { |
3262 | int nwords = |
3263 | sc->sc_gigabit ? MCHASH_NWORDS_83820 : MCHASH_NWORDS_83815; |
3264 | /* |
3265 | * Program the multicast hash table. |
3266 | */ |
3267 | for (i = 0; i < nwords; i++) { |
3268 | FILTER_EMIT(sc->sc_parm->p_filtmem + (i * 2), mchash[i]); |
3269 | } |
3270 | } |
3271 | #undef FILTER_EMIT |
3272 | #undef MCHASH_NWORDS |
3273 | #undef MCHASH_NWORDS_83815 |
3274 | #undef MCHASH_NWORDS_83820 |
3275 | |
3276 | /* |
3277 | * Re-enable the receiver filter. |
3278 | */ |
3279 | bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr); |
3280 | } |
3281 | |
3282 | /* |
3283 | * sip_dp83820_mii_readreg: [mii interface function] |
3284 | * |
3285 | * Read a PHY register on the MII of the DP83820. |
3286 | */ |
3287 | static int |
3288 | sipcom_dp83820_mii_readreg(device_t self, int phy, int reg) |
3289 | { |
3290 | struct sip_softc *sc = device_private(self); |
3291 | |
3292 | if (sc->sc_cfg & CFG_TBI_EN) { |
3293 | bus_addr_t tbireg; |
3294 | int rv; |
3295 | |
3296 | if (phy != 0) |
3297 | return (0); |
3298 | |
3299 | switch (reg) { |
3300 | case MII_BMCR: tbireg = SIP_TBICR; break; |
3301 | case MII_BMSR: tbireg = SIP_TBISR; break; |
3302 | case MII_ANAR: tbireg = SIP_TANAR; break; |
3303 | case MII_ANLPAR: tbireg = SIP_TANLPAR; break; |
3304 | case MII_ANER: tbireg = SIP_TANER; break; |
3305 | case MII_EXTSR: |
3306 | /* |
3307 | * Don't even bother reading the TESR register. |
3308 | * The manual documents that the device has |
3309 | * 1000baseX full/half capability, but the |
3310 | * register itself seems read back 0 on some |
3311 | * boards. Just hard-code the result. |
3312 | */ |
3313 | return (EXTSR_1000XFDX|EXTSR_1000XHDX); |
3314 | |
3315 | default: |
3316 | return (0); |
3317 | } |
3318 | |
3319 | rv = bus_space_read_4(sc->sc_st, sc->sc_sh, tbireg) & 0xffff; |
3320 | if (tbireg == SIP_TBISR) { |
3321 | /* LINK and ACOMP are switched! */ |
3322 | int val = rv; |
3323 | |
3324 | rv = 0; |
3325 | if (val & TBISR_MR_LINK_STATUS) |
3326 | rv |= BMSR_LINK; |
3327 | if (val & TBISR_MR_AN_COMPLETE) |
3328 | rv |= BMSR_ACOMP; |
3329 | |
3330 | /* |
3331 | * The manual claims this register reads back 0 |
3332 | * on hard and soft reset. But we want to let |
3333 | * the gentbi driver know that we support auto- |
3334 | * negotiation, so hard-code this bit in the |
3335 | * result. |
3336 | */ |
3337 | rv |= BMSR_ANEG | BMSR_EXTSTAT; |
3338 | } |
3339 | |
3340 | return (rv); |
3341 | } |
3342 | |
3343 | return mii_bitbang_readreg(self, &sipcom_mii_bitbang_ops, phy, reg); |
3344 | } |
3345 | |
3346 | /* |
3347 | * sip_dp83820_mii_writereg: [mii interface function] |
3348 | * |
3349 | * Write a PHY register on the MII of the DP83820. |
3350 | */ |
3351 | static void |
3352 | sipcom_dp83820_mii_writereg(device_t self, int phy, int reg, int val) |
3353 | { |
3354 | struct sip_softc *sc = device_private(self); |
3355 | |
3356 | if (sc->sc_cfg & CFG_TBI_EN) { |
3357 | bus_addr_t tbireg; |
3358 | |
3359 | if (phy != 0) |
3360 | return; |
3361 | |
3362 | switch (reg) { |
3363 | case MII_BMCR: tbireg = SIP_TBICR; break; |
3364 | case MII_ANAR: tbireg = SIP_TANAR; break; |
3365 | case MII_ANLPAR: tbireg = SIP_TANLPAR; break; |
3366 | default: |
3367 | return; |
3368 | } |
3369 | |
3370 | bus_space_write_4(sc->sc_st, sc->sc_sh, tbireg, val); |
3371 | return; |
3372 | } |
3373 | |
3374 | mii_bitbang_writereg(self, &sipcom_mii_bitbang_ops, phy, reg, val); |
3375 | } |
3376 | |
3377 | /* |
3378 | * sip_dp83820_mii_statchg: [mii interface function] |
3379 | * |
3380 | * Callback from MII layer when media changes. |
3381 | */ |
3382 | static void |
3383 | sipcom_dp83820_mii_statchg(struct ifnet *ifp) |
3384 | { |
3385 | struct sip_softc *sc = ifp->if_softc; |
3386 | struct mii_data *mii = &sc->sc_mii; |
3387 | u_int32_t cfg, pcr; |
3388 | |
3389 | /* |
3390 | * Get flow control negotiation result. |
3391 | */ |
3392 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && |
3393 | (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { |
3394 | sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; |
3395 | mii->mii_media_active &= ~IFM_ETH_FMASK; |
3396 | } |
3397 | |
3398 | /* |
3399 | * Update TXCFG for full-duplex operation. |
3400 | */ |
3401 | if ((mii->mii_media_active & IFM_FDX) != 0) |
3402 | sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); |
3403 | else |
3404 | sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); |
3405 | |
3406 | /* |
3407 | * Update RXCFG for full-duplex or loopback. |
3408 | */ |
3409 | if ((mii->mii_media_active & IFM_FDX) != 0 || |
3410 | IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP) |
3411 | sc->sc_rxcfg |= RXCFG_ATX; |
3412 | else |
3413 | sc->sc_rxcfg &= ~RXCFG_ATX; |
3414 | |
3415 | /* |
3416 | * Update CFG for MII/GMII. |
3417 | */ |
3418 | if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000)) |
3419 | cfg = sc->sc_cfg | CFG_MODE_1000; |
3420 | else |
3421 | cfg = sc->sc_cfg; |
3422 | |
3423 | /* |
3424 | * 802.3x flow control. |
3425 | */ |
3426 | pcr = 0; |
3427 | if (sc->sc_flowflags & IFM_FLOW) { |
3428 | if (sc->sc_flowflags & IFM_ETH_TXPAUSE) |
3429 | pcr |= sc->sc_rx_flow_thresh; |
3430 | if (sc->sc_flowflags & IFM_ETH_RXPAUSE) |
3431 | pcr |= PCR_PSEN | PCR_PS_MCAST; |
3432 | } |
3433 | |
3434 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg); |
3435 | bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg, |
3436 | sc->sc_txcfg); |
3437 | bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg, |
3438 | sc->sc_rxcfg); |
3439 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PCR, pcr); |
3440 | } |
3441 | |
3442 | /* |
3443 | * sip_mii_bitbang_read: [mii bit-bang interface function] |
3444 | * |
3445 | * Read the MII serial port for the MII bit-bang module. |
3446 | */ |
3447 | static u_int32_t |
3448 | sipcom_mii_bitbang_read(device_t self) |
3449 | { |
3450 | struct sip_softc *sc = device_private(self); |
3451 | |
3452 | return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR)); |
3453 | } |
3454 | |
3455 | /* |
3456 | * sip_mii_bitbang_write: [mii big-bang interface function] |
3457 | * |
3458 | * Write the MII serial port for the MII bit-bang module. |
3459 | */ |
3460 | static void |
3461 | sipcom_mii_bitbang_write(device_t self, u_int32_t val) |
3462 | { |
3463 | struct sip_softc *sc = device_private(self); |
3464 | |
3465 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val); |
3466 | } |
3467 | |
3468 | /* |
3469 | * sip_sis900_mii_readreg: [mii interface function] |
3470 | * |
3471 | * Read a PHY register on the MII. |
3472 | */ |
3473 | static int |
3474 | sipcom_sis900_mii_readreg(device_t self, int phy, int reg) |
3475 | { |
3476 | struct sip_softc *sc = device_private(self); |
3477 | u_int32_t enphy; |
3478 | |
3479 | /* |
3480 | * The PHY of recent SiS chipsets is accessed through bitbang |
3481 | * operations. |
3482 | */ |
3483 | if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900) |
3484 | return mii_bitbang_readreg(self, &sipcom_mii_bitbang_ops, |
3485 | phy, reg); |
3486 | |
3487 | #ifndef SIS900_MII_RESTRICT |
3488 | /* |
3489 | * The SiS 900 has only an internal PHY on the MII. Only allow |
3490 | * MII address 0. |
3491 | */ |
3492 | if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0) |
3493 | return (0); |
3494 | #endif |
3495 | |
3496 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY, |
3497 | (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) | |
3498 | ENPHY_RWCMD | ENPHY_ACCESS); |
3499 | do { |
3500 | enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY); |
3501 | } while (enphy & ENPHY_ACCESS); |
3502 | return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT); |
3503 | } |
3504 | |
3505 | /* |
3506 | * sip_sis900_mii_writereg: [mii interface function] |
3507 | * |
3508 | * Write a PHY register on the MII. |
3509 | */ |
3510 | static void |
3511 | sipcom_sis900_mii_writereg(device_t self, int phy, int reg, int val) |
3512 | { |
3513 | struct sip_softc *sc = device_private(self); |
3514 | u_int32_t enphy; |
3515 | |
3516 | if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900) { |
3517 | mii_bitbang_writereg(self, &sipcom_mii_bitbang_ops, |
3518 | phy, reg, val); |
3519 | return; |
3520 | } |
3521 | |
3522 | #ifndef SIS900_MII_RESTRICT |
3523 | /* |
3524 | * The SiS 900 has only an internal PHY on the MII. Only allow |
3525 | * MII address 0. |
3526 | */ |
3527 | if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0) |
3528 | return; |
3529 | #endif |
3530 | |
3531 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY, |
3532 | (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) | |
3533 | (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS); |
3534 | do { |
3535 | enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY); |
3536 | } while (enphy & ENPHY_ACCESS); |
3537 | } |
3538 | |
3539 | /* |
3540 | * sip_sis900_mii_statchg: [mii interface function] |
3541 | * |
3542 | * Callback from MII layer when media changes. |
3543 | */ |
3544 | static void |
3545 | sipcom_sis900_mii_statchg(struct ifnet *ifp) |
3546 | { |
3547 | struct sip_softc *sc = ifp->if_softc; |
3548 | struct mii_data *mii = &sc->sc_mii; |
3549 | u_int32_t flowctl; |
3550 | |
3551 | /* |
3552 | * Get flow control negotiation result. |
3553 | */ |
3554 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && |
3555 | (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { |
3556 | sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; |
3557 | mii->mii_media_active &= ~IFM_ETH_FMASK; |
3558 | } |
3559 | |
3560 | /* |
3561 | * Update TXCFG for full-duplex operation. |
3562 | */ |
3563 | if ((mii->mii_media_active & IFM_FDX) != 0) |
3564 | sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); |
3565 | else |
3566 | sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); |
3567 | |
3568 | /* |
3569 | * Update RXCFG for full-duplex or loopback. |
3570 | */ |
3571 | if ((mii->mii_media_active & IFM_FDX) != 0 || |
3572 | IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP) |
3573 | sc->sc_rxcfg |= RXCFG_ATX; |
3574 | else |
3575 | sc->sc_rxcfg &= ~RXCFG_ATX; |
3576 | |
3577 | /* |
3578 | * Update IMR for use of 802.3x flow control. |
3579 | */ |
3580 | if (sc->sc_flowflags & IFM_FLOW) { |
3581 | sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST); |
3582 | flowctl = FLOWCTL_FLOWEN; |
3583 | } else { |
3584 | sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST); |
3585 | flowctl = 0; |
3586 | } |
3587 | |
3588 | bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg, |
3589 | sc->sc_txcfg); |
3590 | bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg, |
3591 | sc->sc_rxcfg); |
3592 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr); |
3593 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl); |
3594 | } |
3595 | |
3596 | /* |
3597 | * sip_dp83815_mii_readreg: [mii interface function] |
3598 | * |
3599 | * Read a PHY register on the MII. |
3600 | */ |
3601 | static int |
3602 | sipcom_dp83815_mii_readreg(device_t self, int phy, int reg) |
3603 | { |
3604 | struct sip_softc *sc = device_private(self); |
3605 | u_int32_t val; |
3606 | |
3607 | /* |
3608 | * The DP83815 only has an internal PHY. Only allow |
3609 | * MII address 0. |
3610 | */ |
3611 | if (phy != 0) |
3612 | return (0); |
3613 | |
3614 | /* |
3615 | * Apparently, after a reset, the DP83815 can take a while |
3616 | * to respond. During this recovery period, the BMSR returns |
3617 | * a value of 0. Catch this -- it's not supposed to happen |
3618 | * (the BMSR has some hardcoded-to-1 bits), and wait for the |
3619 | * PHY to come back to life. |
3620 | * |
3621 | * This works out because the BMSR is the first register |
3622 | * read during the PHY probe process. |
3623 | */ |
3624 | do { |
3625 | val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg)); |
3626 | } while (reg == MII_BMSR && val == 0); |
3627 | |
3628 | return (val & 0xffff); |
3629 | } |
3630 | |
3631 | /* |
3632 | * sip_dp83815_mii_writereg: [mii interface function] |
3633 | * |
3634 | * Write a PHY register to the MII. |
3635 | */ |
3636 | static void |
3637 | sipcom_dp83815_mii_writereg(device_t self, int phy, int reg, int val) |
3638 | { |
3639 | struct sip_softc *sc = device_private(self); |
3640 | |
3641 | /* |
3642 | * The DP83815 only has an internal PHY. Only allow |
3643 | * MII address 0. |
3644 | */ |
3645 | if (phy != 0) |
3646 | return; |
3647 | |
3648 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val); |
3649 | } |
3650 | |
3651 | /* |
3652 | * sip_dp83815_mii_statchg: [mii interface function] |
3653 | * |
3654 | * Callback from MII layer when media changes. |
3655 | */ |
3656 | static void |
3657 | sipcom_dp83815_mii_statchg(struct ifnet *ifp) |
3658 | { |
3659 | struct sip_softc *sc = ifp->if_softc; |
3660 | |
3661 | /* |
3662 | * Update TXCFG for full-duplex operation. |
3663 | */ |
3664 | if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) |
3665 | sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); |
3666 | else |
3667 | sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); |
3668 | |
3669 | /* |
3670 | * Update RXCFG for full-duplex or loopback. |
3671 | */ |
3672 | if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 || |
3673 | IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP) |
3674 | sc->sc_rxcfg |= RXCFG_ATX; |
3675 | else |
3676 | sc->sc_rxcfg &= ~RXCFG_ATX; |
3677 | |
3678 | /* |
3679 | * XXX 802.3x flow control. |
3680 | */ |
3681 | |
3682 | bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg, |
3683 | sc->sc_txcfg); |
3684 | bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg, |
3685 | sc->sc_rxcfg); |
3686 | |
3687 | /* |
3688 | * Some DP83815s experience problems when used with short |
3689 | * (< 30m/100ft) Ethernet cables in 100BaseTX mode. This |
3690 | * sequence adjusts the DSP's signal attenuation to fix the |
3691 | * problem. |
3692 | */ |
3693 | if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) { |
3694 | uint32_t reg; |
3695 | |
3696 | bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0x0001); |
3697 | |
3698 | reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4); |
3699 | reg &= 0x0fff; |
3700 | bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4, reg | 0x1000); |
3701 | delay(100); |
3702 | reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00fc); |
3703 | reg &= 0x00ff; |
3704 | if ((reg & 0x0080) == 0 || (reg >= 0x00d8)) { |
3705 | bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00fc, |
3706 | 0x00e8); |
3707 | reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4); |
3708 | bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4, |
3709 | reg | 0x20); |
3710 | } |
3711 | |
3712 | bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0); |
3713 | } |
3714 | } |
3715 | |
3716 | static void |
3717 | sipcom_dp83820_read_macaddr(struct sip_softc *sc, |
3718 | const struct pci_attach_args *pa, u_int8_t *enaddr) |
3719 | { |
3720 | u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2]; |
3721 | u_int8_t cksum, *e, match; |
3722 | int i; |
3723 | |
3724 | /* |
3725 | * EEPROM data format for the DP83820 can be found in |
3726 | * the DP83820 manual, section 4.2.4. |
3727 | */ |
3728 | |
3729 | sipcom_read_eeprom(sc, 0, __arraycount(eeprom_data), eeprom_data); |
3730 | |
3731 | match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8; |
3732 | match = ~(match - 1); |
3733 | |
3734 | cksum = 0x55; |
3735 | e = (u_int8_t *) eeprom_data; |
3736 | for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++) |
3737 | cksum += *e++; |
3738 | |
3739 | if (cksum != match) |
3740 | printf("%s: Checksum (%x) mismatch (%x)" , |
3741 | device_xname(sc->sc_dev), cksum, match); |
3742 | |
3743 | enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff; |
3744 | enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8; |
3745 | enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff; |
3746 | enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8; |
3747 | enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff; |
3748 | enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8; |
3749 | } |
3750 | |
3751 | static void |
3752 | sipcom_sis900_eeprom_delay(struct sip_softc *sc) |
3753 | { |
3754 | int i; |
3755 | |
3756 | /* |
3757 | * FreeBSD goes from (300/33)+1 [10] to 0. There must be |
3758 | * a reason, but I don't know it. |
3759 | */ |
3760 | for (i = 0; i < 10; i++) |
3761 | bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR); |
3762 | } |
3763 | |
3764 | static void |
3765 | sipcom_sis900_read_macaddr(struct sip_softc *sc, |
3766 | const struct pci_attach_args *pa, u_int8_t *enaddr) |
3767 | { |
3768 | u_int16_t myea[ETHER_ADDR_LEN / 2]; |
3769 | |
3770 | switch (sc->sc_rev) { |
3771 | case SIS_REV_630S: |
3772 | case SIS_REV_630E: |
3773 | case SIS_REV_630EA1: |
3774 | case SIS_REV_630ET: |
3775 | case SIS_REV_635: |
3776 | /* |
3777 | * The MAC address for the on-board Ethernet of |
3778 | * the SiS 630 chipset is in the NVRAM. Kick |
3779 | * the chip into re-loading it from NVRAM, and |
3780 | * read the MAC address out of the filter registers. |
3781 | */ |
3782 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD); |
3783 | |
3784 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, |
3785 | RFCR_RFADDR_NODE0); |
3786 | myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & |
3787 | 0xffff; |
3788 | |
3789 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, |
3790 | RFCR_RFADDR_NODE2); |
3791 | myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & |
3792 | 0xffff; |
3793 | |
3794 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, |
3795 | RFCR_RFADDR_NODE4); |
3796 | myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & |
3797 | 0xffff; |
3798 | break; |
3799 | |
3800 | case SIS_REV_960: |
3801 | { |
3802 | #define SIS_SET_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \ |
3803 | bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) | (y)) |
3804 | |
3805 | #define SIS_CLR_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \ |
3806 | bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) & ~(y)) |
3807 | |
3808 | int waittime, i; |
3809 | |
3810 | /* Allow to read EEPROM from LAN. It is shared |
3811 | * between a 1394 controller and the NIC and each |
3812 | * time we access it, we need to set SIS_EECMD_REQ. |
3813 | */ |
3814 | SIS_SET_EROMAR(sc, EROMAR_REQ); |
3815 | |
3816 | for (waittime = 0; waittime < 1000; waittime++) { /* 1 ms max */ |
3817 | /* Force EEPROM to idle state. */ |
3818 | |
3819 | /* |
3820 | * XXX-cube This is ugly. I'll look for docs about it. |
3821 | */ |
3822 | SIS_SET_EROMAR(sc, EROMAR_EECS); |
3823 | sipcom_sis900_eeprom_delay(sc); |
3824 | for (i = 0; i <= 25; i++) { /* Yes, 26 times. */ |
3825 | SIS_SET_EROMAR(sc, EROMAR_EESK); |
3826 | sipcom_sis900_eeprom_delay(sc); |
3827 | SIS_CLR_EROMAR(sc, EROMAR_EESK); |
3828 | sipcom_sis900_eeprom_delay(sc); |
3829 | } |
3830 | SIS_CLR_EROMAR(sc, EROMAR_EECS); |
3831 | sipcom_sis900_eeprom_delay(sc); |
3832 | bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, 0); |
3833 | |
3834 | if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR) & EROMAR_GNT) { |
3835 | sipcom_read_eeprom(sc, SIP_EEPROM_ETHERNET_ID0 >> 1, |
3836 | sizeof(myea) / sizeof(myea[0]), myea); |
3837 | break; |
3838 | } |
3839 | DELAY(1); |
3840 | } |
3841 | |
3842 | /* |
3843 | * Set SIS_EECTL_CLK to high, so a other master |
3844 | * can operate on the i2c bus. |
3845 | */ |
3846 | SIS_SET_EROMAR(sc, EROMAR_EESK); |
3847 | |
3848 | /* Refuse EEPROM access by LAN */ |
3849 | SIS_SET_EROMAR(sc, EROMAR_DONE); |
3850 | } break; |
3851 | |
3852 | default: |
3853 | sipcom_read_eeprom(sc, SIP_EEPROM_ETHERNET_ID0 >> 1, |
3854 | sizeof(myea) / sizeof(myea[0]), myea); |
3855 | } |
3856 | |
3857 | enaddr[0] = myea[0] & 0xff; |
3858 | enaddr[1] = myea[0] >> 8; |
3859 | enaddr[2] = myea[1] & 0xff; |
3860 | enaddr[3] = myea[1] >> 8; |
3861 | enaddr[4] = myea[2] & 0xff; |
3862 | enaddr[5] = myea[2] >> 8; |
3863 | } |
3864 | |
3865 | /* Table and macro to bit-reverse an octet. */ |
3866 | static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15}; |
3867 | #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf]) |
3868 | |
3869 | static void |
3870 | sipcom_dp83815_read_macaddr(struct sip_softc *sc, |
3871 | const struct pci_attach_args *pa, u_int8_t *enaddr) |
3872 | { |
3873 | u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea; |
3874 | u_int8_t cksum, *e, match; |
3875 | int i; |
3876 | |
3877 | sipcom_read_eeprom(sc, 0, sizeof(eeprom_data) / |
3878 | sizeof(eeprom_data[0]), eeprom_data); |
3879 | |
3880 | match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8; |
3881 | match = ~(match - 1); |
3882 | |
3883 | cksum = 0x55; |
3884 | e = (u_int8_t *) eeprom_data; |
3885 | for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) { |
3886 | cksum += *e++; |
3887 | } |
3888 | if (cksum != match) { |
3889 | printf("%s: Checksum (%x) mismatch (%x)" , |
3890 | device_xname(sc->sc_dev), cksum, match); |
3891 | } |
3892 | |
3893 | /* |
3894 | * Unrolled because it makes slightly more sense this way. |
3895 | * The DP83815 stores the MAC address in bit 0 of word 6 |
3896 | * through bit 15 of word 8. |
3897 | */ |
3898 | ea = &eeprom_data[6]; |
3899 | enaddr[0] = ((*ea & 0x1) << 7); |
3900 | ea++; |
3901 | enaddr[0] |= ((*ea & 0xFE00) >> 9); |
3902 | enaddr[1] = ((*ea & 0x1FE) >> 1); |
3903 | enaddr[2] = ((*ea & 0x1) << 7); |
3904 | ea++; |
3905 | enaddr[2] |= ((*ea & 0xFE00) >> 9); |
3906 | enaddr[3] = ((*ea & 0x1FE) >> 1); |
3907 | enaddr[4] = ((*ea & 0x1) << 7); |
3908 | ea++; |
3909 | enaddr[4] |= ((*ea & 0xFE00) >> 9); |
3910 | enaddr[5] = ((*ea & 0x1FE) >> 1); |
3911 | |
3912 | /* |
3913 | * In case that's not weird enough, we also need to reverse |
3914 | * the bits in each byte. This all actually makes more sense |
3915 | * if you think about the EEPROM storage as an array of bits |
3916 | * being shifted into bytes, but that's not how we're looking |
3917 | * at it here... |
3918 | */ |
3919 | for (i = 0; i < 6 ;i++) |
3920 | enaddr[i] = bbr(enaddr[i]); |
3921 | } |
3922 | |
3923 | /* |
3924 | * sip_mediastatus: [ifmedia interface function] |
3925 | * |
3926 | * Get the current interface media status. |
3927 | */ |
3928 | static void |
3929 | sipcom_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
3930 | { |
3931 | struct sip_softc *sc = ifp->if_softc; |
3932 | |
3933 | if (!device_is_active(sc->sc_dev)) { |
3934 | ifmr->ifm_active = IFM_ETHER | IFM_NONE; |
3935 | ifmr->ifm_status = 0; |
3936 | return; |
3937 | } |
3938 | ether_mediastatus(ifp, ifmr); |
3939 | ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) | |
3940 | sc->sc_flowflags; |
3941 | } |
3942 | |