1 | /* $NetBSD: if_wm.c,v 1.453 2016/11/21 03:57:37 msaitoh Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. |
8 | * |
9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions |
11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior |
23 | * written permission. |
24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ |
37 | |
38 | /******************************************************************************* |
39 | |
40 | Copyright (c) 2001-2005, Intel Corporation |
41 | All rights reserved. |
42 | |
43 | Redistribution and use in source and binary forms, with or without |
44 | modification, are permitted provided that the following conditions are met: |
45 | |
46 | 1. Redistributions of source code must retain the above copyright notice, |
47 | this list of conditions and the following disclaimer. |
48 | |
49 | 2. Redistributions in binary form must reproduce the above copyright |
50 | notice, this list of conditions and the following disclaimer in the |
51 | documentation and/or other materials provided with the distribution. |
52 | |
53 | 3. Neither the name of the Intel Corporation nor the names of its |
54 | contributors may be used to endorse or promote products derived from |
55 | this software without specific prior written permission. |
56 | |
57 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
58 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
59 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
60 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
61 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
62 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
63 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
64 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
66 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
67 | POSSIBILITY OF SUCH DAMAGE. |
68 | |
69 | *******************************************************************************/ |
70 | /* |
71 | * Device driver for the Intel i8254x family of Gigabit Ethernet chips. |
72 | * |
73 | * TODO (in order of importance): |
74 | * |
75 | * - Check XXX'ed comments |
76 | * - Disable D0 LPLU on 8257[12356], 82580 and I350. |
77 | * - TX Multi queue improvement (refine queue selection logic) |
78 | * - Advanced Receive Descriptor |
79 | * - EEE (Energy Efficiency Ethernet) |
80 | * - Virtual Function |
81 | * - Set LED correctly (based on contents in EEPROM) |
82 | * - Rework how parameters are loaded from the EEPROM. |
83 | * - Image Unique ID |
84 | */ |
85 | |
86 | #include <sys/cdefs.h> |
87 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.453 2016/11/21 03:57:37 msaitoh Exp $" ); |
88 | |
89 | #ifdef _KERNEL_OPT |
90 | #include "opt_net_mpsafe.h" |
91 | #endif |
92 | |
93 | #include <sys/param.h> |
94 | #include <sys/systm.h> |
95 | #include <sys/callout.h> |
96 | #include <sys/mbuf.h> |
97 | #include <sys/malloc.h> |
98 | #include <sys/kmem.h> |
99 | #include <sys/kernel.h> |
100 | #include <sys/socket.h> |
101 | #include <sys/ioctl.h> |
102 | #include <sys/errno.h> |
103 | #include <sys/device.h> |
104 | #include <sys/queue.h> |
105 | #include <sys/syslog.h> |
106 | #include <sys/interrupt.h> |
107 | #include <sys/cpu.h> |
108 | #include <sys/pcq.h> |
109 | |
110 | #include <sys/rndsource.h> |
111 | |
112 | #include <net/if.h> |
113 | #include <net/if_dl.h> |
114 | #include <net/if_media.h> |
115 | #include <net/if_ether.h> |
116 | |
117 | #include <net/bpf.h> |
118 | |
119 | #include <netinet/in.h> /* XXX for struct ip */ |
120 | #include <netinet/in_systm.h> /* XXX for struct ip */ |
121 | #include <netinet/ip.h> /* XXX for struct ip */ |
122 | #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ |
123 | #include <netinet/tcp.h> /* XXX for struct tcphdr */ |
124 | |
125 | #include <sys/bus.h> |
126 | #include <sys/intr.h> |
127 | #include <machine/endian.h> |
128 | |
129 | #include <dev/mii/mii.h> |
130 | #include <dev/mii/miivar.h> |
131 | #include <dev/mii/miidevs.h> |
132 | #include <dev/mii/mii_bitbang.h> |
133 | #include <dev/mii/ikphyreg.h> |
134 | #include <dev/mii/igphyreg.h> |
135 | #include <dev/mii/igphyvar.h> |
136 | #include <dev/mii/inbmphyreg.h> |
137 | |
138 | #include <dev/pci/pcireg.h> |
139 | #include <dev/pci/pcivar.h> |
140 | #include <dev/pci/pcidevs.h> |
141 | |
142 | #include <dev/pci/if_wmreg.h> |
143 | #include <dev/pci/if_wmvar.h> |
144 | |
145 | #ifdef WM_DEBUG |
146 | #define WM_DEBUG_LINK __BIT(0) |
147 | #define WM_DEBUG_TX __BIT(1) |
148 | #define WM_DEBUG_RX __BIT(2) |
149 | #define WM_DEBUG_GMII __BIT(3) |
150 | #define WM_DEBUG_MANAGE __BIT(4) |
151 | #define WM_DEBUG_NVM __BIT(5) |
152 | #define WM_DEBUG_INIT __BIT(6) |
153 | #define WM_DEBUG_LOCK __BIT(7) |
154 | int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII |
155 | | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; |
156 | |
157 | #define DPRINTF(x, y) if (wm_debug & (x)) printf y |
158 | #else |
159 | #define DPRINTF(x, y) /* nothing */ |
160 | #endif /* WM_DEBUG */ |
161 | |
162 | #ifdef NET_MPSAFE |
163 | #define WM_MPSAFE 1 |
164 | #endif |
165 | |
166 | /* |
167 | * This device driver's max interrupt numbers. |
168 | */ |
169 | #define WM_MAX_NQUEUEINTR 16 |
170 | #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) |
171 | |
172 | /* |
173 | * Transmit descriptor list size. Due to errata, we can only have |
174 | * 256 hardware descriptors in the ring on < 82544, but we use 4096 |
175 | * on >= 82544. We tell the upper layers that they can queue a lot |
176 | * of packets, and we go ahead and manage up to 64 (16 for the i82547) |
177 | * of them at a time. |
178 | * |
179 | * We allow up to 256 (!) DMA segments per packet. Pathological packet |
180 | * chains containing many small mbufs have been observed in zero-copy |
181 | * situations with jumbo frames. |
182 | */ |
183 | #define WM_NTXSEGS 256 |
184 | #define WM_IFQUEUELEN 256 |
185 | #define WM_TXQUEUELEN_MAX 64 |
186 | #define WM_TXQUEUELEN_MAX_82547 16 |
187 | #define WM_TXQUEUELEN(txq) ((txq)->txq_num) |
188 | #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) |
189 | #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) |
190 | #define WM_NTXDESC_82542 256 |
191 | #define WM_NTXDESC_82544 4096 |
192 | #define WM_NTXDESC(txq) ((txq)->txq_ndesc) |
193 | #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) |
194 | #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) |
195 | #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) |
196 | #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) |
197 | |
198 | #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ |
199 | |
200 | #define WM_TXINTERQSIZE 256 |
201 | |
202 | /* |
203 | * Receive descriptor list size. We have one Rx buffer for normal |
204 | * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized |
205 | * packet. We allocate 256 receive descriptors, each with a 2k |
206 | * buffer (MCLBYTES), which gives us room for 50 jumbo packets. |
207 | */ |
208 | #define WM_NRXDESC 256 |
209 | #define WM_NRXDESC_MASK (WM_NRXDESC - 1) |
210 | #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) |
211 | #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) |
212 | |
213 | typedef union txdescs { |
214 | wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; |
215 | nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; |
216 | } txdescs_t; |
217 | |
218 | #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) |
219 | #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x) |
220 | |
221 | /* |
222 | * Software state for transmit jobs. |
223 | */ |
224 | struct wm_txsoft { |
225 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
226 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
227 | int txs_firstdesc; /* first descriptor in packet */ |
228 | int txs_lastdesc; /* last descriptor in packet */ |
229 | int txs_ndesc; /* # of descriptors used */ |
230 | }; |
231 | |
232 | /* |
233 | * Software state for receive buffers. Each descriptor gets a |
234 | * 2k (MCLBYTES) buffer and a DMA map. For packets which fill |
235 | * more than one buffer, we chain them together. |
236 | */ |
237 | struct wm_rxsoft { |
238 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
239 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
240 | }; |
241 | |
242 | #define WM_LINKUP_TIMEOUT 50 |
243 | |
244 | static uint16_t swfwphysem[] = { |
245 | SWFW_PHY0_SM, |
246 | SWFW_PHY1_SM, |
247 | SWFW_PHY2_SM, |
248 | SWFW_PHY3_SM |
249 | }; |
250 | |
251 | static const uint32_t wm_82580_rxpbs_table[] = { |
252 | 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 |
253 | }; |
254 | |
255 | struct wm_softc; |
256 | |
257 | #ifdef WM_EVENT_COUNTERS |
258 | #define WM_Q_EVCNT_DEFINE(qname, evname) \ |
259 | char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ |
260 | struct evcnt qname##_ev_##evname; |
261 | |
262 | #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ |
263 | do{ \ |
264 | snprintf((q)->qname##_##evname##_evcnt_name, \ |
265 | sizeof((q)->qname##_##evname##_evcnt_name), \ |
266 | "%s%02d%s", #qname, (qnum), #evname); \ |
267 | evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ |
268 | (evtype), NULL, (xname), \ |
269 | (q)->qname##_##evname##_evcnt_name); \ |
270 | }while(0) |
271 | |
272 | #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ |
273 | WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) |
274 | |
275 | #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ |
276 | WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) |
277 | #endif /* WM_EVENT_COUNTERS */ |
278 | |
279 | struct wm_txqueue { |
280 | kmutex_t *txq_lock; /* lock for tx operations */ |
281 | |
282 | struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ |
283 | |
284 | /* Software state for the transmit descriptors. */ |
285 | int txq_num; /* must be a power of two */ |
286 | struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; |
287 | |
288 | /* TX control data structures. */ |
289 | int txq_ndesc; /* must be a power of two */ |
290 | size_t txq_descsize; /* a tx descriptor size */ |
291 | txdescs_t *txq_descs_u; |
292 | bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ |
293 | bus_dma_segment_t txq_desc_seg; /* control data segment */ |
294 | int txq_desc_rseg; /* real number of control segment */ |
295 | #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr |
296 | #define txq_descs txq_descs_u->sctxu_txdescs |
297 | #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs |
298 | |
299 | bus_addr_t txq_tdt_reg; /* offset of TDT register */ |
300 | |
301 | int txq_free; /* number of free Tx descriptors */ |
302 | int txq_next; /* next ready Tx descriptor */ |
303 | |
304 | int txq_sfree; /* number of free Tx jobs */ |
305 | int txq_snext; /* next free Tx job */ |
306 | int txq_sdirty; /* dirty Tx jobs */ |
307 | |
308 | /* These 4 variables are used only on the 82547. */ |
309 | int txq_fifo_size; /* Tx FIFO size */ |
310 | int txq_fifo_head; /* current head of FIFO */ |
311 | uint32_t txq_fifo_addr; /* internal address of start of FIFO */ |
312 | int txq_fifo_stall; /* Tx FIFO is stalled */ |
313 | |
314 | /* |
315 | * When ncpu > number of Tx queues, a Tx queue is shared by multiple |
316 | * CPUs. This queue intermediate them without block. |
317 | */ |
318 | pcq_t *txq_interq; |
319 | |
320 | /* |
321 | * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags |
322 | * to manage Tx H/W queue's busy flag. |
323 | */ |
324 | int txq_flags; /* flags for H/W queue, see below */ |
325 | #define WM_TXQ_NO_SPACE 0x1 |
326 | |
327 | bool txq_stopping; |
328 | |
329 | #ifdef WM_EVENT_COUNTERS |
330 | WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */ |
331 | WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */ |
332 | WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */ |
333 | WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ |
334 | WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ |
335 | /* XXX not used? */ |
336 | |
337 | WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */ |
338 | WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */ |
339 | WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */ |
340 | WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */ |
341 | WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */ |
342 | WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */ |
343 | |
344 | WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */ |
345 | |
346 | WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */ |
347 | |
348 | char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX" )]; |
349 | struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ |
350 | #endif /* WM_EVENT_COUNTERS */ |
351 | }; |
352 | |
353 | struct wm_rxqueue { |
354 | kmutex_t *rxq_lock; /* lock for rx operations */ |
355 | |
356 | struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ |
357 | |
358 | /* Software state for the receive descriptors. */ |
359 | wiseman_rxdesc_t *rxq_descs; |
360 | |
361 | /* RX control data structures. */ |
362 | struct wm_rxsoft rxq_soft[WM_NRXDESC]; |
363 | bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ |
364 | bus_dma_segment_t rxq_desc_seg; /* control data segment */ |
365 | int rxq_desc_rseg; /* real number of control segment */ |
366 | size_t rxq_desc_size; /* control data size */ |
367 | #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr |
368 | |
369 | bus_addr_t rxq_rdt_reg; /* offset of RDT register */ |
370 | |
371 | int rxq_ptr; /* next ready Rx desc/queue ent */ |
372 | int rxq_discard; |
373 | int rxq_len; |
374 | struct mbuf *rxq_head; |
375 | struct mbuf *rxq_tail; |
376 | struct mbuf **rxq_tailp; |
377 | |
378 | bool rxq_stopping; |
379 | |
380 | #ifdef WM_EVENT_COUNTERS |
381 | WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */ |
382 | |
383 | WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */ |
384 | WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */ |
385 | #endif |
386 | }; |
387 | |
388 | struct wm_queue { |
389 | int wmq_id; /* index of transmit and receive queues */ |
390 | int wmq_intr_idx; /* index of MSI-X tables */ |
391 | |
392 | struct wm_txqueue wmq_txq; |
393 | struct wm_rxqueue wmq_rxq; |
394 | }; |
395 | |
396 | struct wm_phyop { |
397 | int (*acquire)(struct wm_softc *); |
398 | void (*release)(struct wm_softc *); |
399 | int reset_delay_us; |
400 | }; |
401 | |
402 | /* |
403 | * Software state per device. |
404 | */ |
405 | struct wm_softc { |
406 | device_t sc_dev; /* generic device information */ |
407 | bus_space_tag_t sc_st; /* bus space tag */ |
408 | bus_space_handle_t sc_sh; /* bus space handle */ |
409 | bus_size_t sc_ss; /* bus space size */ |
410 | bus_space_tag_t sc_iot; /* I/O space tag */ |
411 | bus_space_handle_t sc_ioh; /* I/O space handle */ |
412 | bus_size_t sc_ios; /* I/O space size */ |
413 | bus_space_tag_t sc_flasht; /* flash registers space tag */ |
414 | bus_space_handle_t sc_flashh; /* flash registers space handle */ |
415 | bus_size_t sc_flashs; /* flash registers space size */ |
416 | off_t sc_flashreg_offset; /* |
417 | * offset to flash registers from |
418 | * start of BAR |
419 | */ |
420 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
421 | |
422 | struct ethercom sc_ethercom; /* ethernet common data */ |
423 | struct mii_data sc_mii; /* MII/media information */ |
424 | |
425 | pci_chipset_tag_t sc_pc; |
426 | pcitag_t sc_pcitag; |
427 | int sc_bus_speed; /* PCI/PCIX bus speed */ |
428 | int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ |
429 | |
430 | uint16_t sc_pcidevid; /* PCI device ID */ |
431 | wm_chip_type sc_type; /* MAC type */ |
432 | int sc_rev; /* MAC revision */ |
433 | wm_phy_type sc_phytype; /* PHY type */ |
434 | uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ |
435 | #define WM_MEDIATYPE_UNKNOWN 0x00 |
436 | #define WM_MEDIATYPE_FIBER 0x01 |
437 | #define WM_MEDIATYPE_COPPER 0x02 |
438 | #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ |
439 | int sc_funcid; /* unit number of the chip (0 to 3) */ |
440 | int sc_flags; /* flags; see below */ |
441 | int sc_if_flags; /* last if_flags */ |
442 | int sc_flowflags; /* 802.3x flow control flags */ |
443 | int sc_align_tweak; |
444 | |
445 | void *sc_ihs[WM_MAX_NINTR]; /* |
446 | * interrupt cookie. |
447 | * legacy and msi use sc_ihs[0]. |
448 | */ |
449 | pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */ |
450 | int sc_nintrs; /* number of interrupts */ |
451 | |
452 | int sc_link_intr_idx; /* index of MSI-X tables */ |
453 | |
454 | callout_t sc_tick_ch; /* tick callout */ |
455 | bool sc_core_stopping; |
456 | |
457 | int sc_nvm_ver_major; |
458 | int sc_nvm_ver_minor; |
459 | int sc_nvm_ver_build; |
460 | int sc_nvm_addrbits; /* NVM address bits */ |
461 | unsigned int sc_nvm_wordsize; /* NVM word size */ |
462 | int sc_ich8_flash_base; |
463 | int sc_ich8_flash_bank_size; |
464 | int sc_nvm_k1_enabled; |
465 | |
466 | int sc_nqueues; |
467 | struct wm_queue *sc_queue; |
468 | |
469 | int sc_affinity_offset; |
470 | |
471 | #ifdef WM_EVENT_COUNTERS |
472 | /* Event counters. */ |
473 | struct evcnt sc_ev_linkintr; /* Link interrupts */ |
474 | |
475 | /* WM_T_82542_2_1 only */ |
476 | struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ |
477 | struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ |
478 | struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ |
479 | struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ |
480 | struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ |
481 | #endif /* WM_EVENT_COUNTERS */ |
482 | |
483 | /* This variable are used only on the 82547. */ |
484 | callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ |
485 | |
486 | uint32_t sc_ctrl; /* prototype CTRL register */ |
487 | #if 0 |
488 | uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ |
489 | #endif |
490 | uint32_t sc_icr; /* prototype interrupt bits */ |
491 | uint32_t sc_itr; /* prototype intr throttling reg */ |
492 | uint32_t sc_tctl; /* prototype TCTL register */ |
493 | uint32_t sc_rctl; /* prototype RCTL register */ |
494 | uint32_t sc_txcw; /* prototype TXCW register */ |
495 | uint32_t sc_tipg; /* prototype TIPG register */ |
496 | uint32_t sc_fcrtl; /* prototype FCRTL register */ |
497 | uint32_t sc_pba; /* prototype PBA register */ |
498 | |
499 | int sc_tbi_linkup; /* TBI link status */ |
500 | int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ |
501 | int sc_tbi_serdes_ticks; /* tbi ticks */ |
502 | |
503 | int sc_mchash_type; /* multicast filter offset */ |
504 | |
505 | krndsource_t rnd_source; /* random source */ |
506 | |
507 | struct if_percpuq *sc_ipq; /* softint-based input queues */ |
508 | |
509 | kmutex_t *sc_core_lock; /* lock for softc operations */ |
510 | kmutex_t *sc_ich_phymtx; /* |
511 | * 82574/82583/ICH/PCH specific PHY |
512 | * mutex. For 82574/82583, the mutex |
513 | * is used for both PHY and NVM. |
514 | */ |
515 | kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ |
516 | |
517 | struct wm_phyop phy; |
518 | }; |
519 | |
520 | #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) |
521 | #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) |
522 | #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) |
523 | |
524 | #ifdef WM_MPSAFE |
525 | #define CALLOUT_FLAGS CALLOUT_MPSAFE |
526 | #else |
527 | #define CALLOUT_FLAGS 0 |
528 | #endif |
529 | |
530 | #define WM_RXCHAIN_RESET(rxq) \ |
531 | do { \ |
532 | (rxq)->rxq_tailp = &(rxq)->rxq_head; \ |
533 | *(rxq)->rxq_tailp = NULL; \ |
534 | (rxq)->rxq_len = 0; \ |
535 | } while (/*CONSTCOND*/0) |
536 | |
537 | #define WM_RXCHAIN_LINK(rxq, m) \ |
538 | do { \ |
539 | *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ |
540 | (rxq)->rxq_tailp = &(m)->m_next; \ |
541 | } while (/*CONSTCOND*/0) |
542 | |
543 | #ifdef WM_EVENT_COUNTERS |
544 | #define WM_EVCNT_INCR(ev) (ev)->ev_count++ |
545 | #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) |
546 | |
547 | #define WM_Q_EVCNT_INCR(qname, evname) \ |
548 | WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) |
549 | #define WM_Q_EVCNT_ADD(qname, evname, val) \ |
550 | WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) |
551 | #else /* !WM_EVENT_COUNTERS */ |
552 | #define WM_EVCNT_INCR(ev) /* nothing */ |
553 | #define WM_EVCNT_ADD(ev, val) /* nothing */ |
554 | |
555 | #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ |
556 | #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ |
557 | #endif /* !WM_EVENT_COUNTERS */ |
558 | |
559 | #define CSR_READ(sc, reg) \ |
560 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) |
561 | #define CSR_WRITE(sc, reg, val) \ |
562 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) |
563 | #define CSR_WRITE_FLUSH(sc) \ |
564 | (void) CSR_READ((sc), WMREG_STATUS) |
565 | |
566 | #define ICH8_FLASH_READ32(sc, reg) \ |
567 | bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ |
568 | (reg) + sc->sc_flashreg_offset) |
569 | #define ICH8_FLASH_WRITE32(sc, reg, data) \ |
570 | bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ |
571 | (reg) + sc->sc_flashreg_offset, (data)) |
572 | |
573 | #define ICH8_FLASH_READ16(sc, reg) \ |
574 | bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ |
575 | (reg) + sc->sc_flashreg_offset) |
576 | #define ICH8_FLASH_WRITE16(sc, reg, data) \ |
577 | bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ |
578 | (reg) + sc->sc_flashreg_offset, (data)) |
579 | |
580 | #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) |
581 | #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x))) |
582 | |
583 | #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) |
584 | #define WM_CDTXADDR_HI(txq, x) \ |
585 | (sizeof(bus_addr_t) == 8 ? \ |
586 | (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) |
587 | |
588 | #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) |
589 | #define WM_CDRXADDR_HI(rxq, x) \ |
590 | (sizeof(bus_addr_t) == 8 ? \ |
591 | (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) |
592 | |
593 | /* |
594 | * Register read/write functions. |
595 | * Other than CSR_{READ|WRITE}(). |
596 | */ |
597 | #if 0 |
598 | static inline uint32_t wm_io_read(struct wm_softc *, int); |
599 | #endif |
600 | static inline void wm_io_write(struct wm_softc *, int, uint32_t); |
601 | static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, |
602 | uint32_t, uint32_t); |
603 | static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); |
604 | |
605 | /* |
606 | * Descriptor sync/init functions. |
607 | */ |
608 | static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); |
609 | static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); |
610 | static inline void wm_init_rxdesc(struct wm_rxqueue *, int); |
611 | |
612 | /* |
613 | * Device driver interface functions and commonly used functions. |
614 | * match, attach, detach, init, start, stop, ioctl, watchdog and so on. |
615 | */ |
616 | static const struct wm_product *wm_lookup(const struct pci_attach_args *); |
617 | static int wm_match(device_t, cfdata_t, void *); |
618 | static void wm_attach(device_t, device_t, void *); |
619 | static int wm_detach(device_t, int); |
620 | static bool wm_suspend(device_t, const pmf_qual_t *); |
621 | static bool wm_resume(device_t, const pmf_qual_t *); |
622 | static void wm_watchdog(struct ifnet *); |
623 | static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *); |
624 | static void wm_tick(void *); |
625 | static int wm_ifflags_cb(struct ethercom *); |
626 | static int wm_ioctl(struct ifnet *, u_long, void *); |
627 | /* MAC address related */ |
628 | static uint16_t wm_check_alt_mac_addr(struct wm_softc *); |
629 | static int wm_read_mac_addr(struct wm_softc *, uint8_t *); |
630 | static void wm_set_ral(struct wm_softc *, const uint8_t *, int); |
631 | static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); |
632 | static void wm_set_filter(struct wm_softc *); |
633 | /* Reset and init related */ |
634 | static void wm_set_vlan(struct wm_softc *); |
635 | static void wm_set_pcie_completion_timeout(struct wm_softc *); |
636 | static void wm_get_auto_rd_done(struct wm_softc *); |
637 | static void wm_lan_init_done(struct wm_softc *); |
638 | static void wm_get_cfg_done(struct wm_softc *); |
639 | static void wm_initialize_hardware_bits(struct wm_softc *); |
640 | static uint32_t wm_rxpbs_adjust_82580(uint32_t); |
641 | static void wm_reset_phy(struct wm_softc *); |
642 | static void wm_flush_desc_rings(struct wm_softc *); |
643 | static void wm_reset(struct wm_softc *); |
644 | static int wm_add_rxbuf(struct wm_rxqueue *, int); |
645 | static void wm_rxdrain(struct wm_rxqueue *); |
646 | static void wm_rss_getkey(uint8_t *); |
647 | static void wm_init_rss(struct wm_softc *); |
648 | static void wm_adjust_qnum(struct wm_softc *, int); |
649 | static int wm_setup_legacy(struct wm_softc *); |
650 | static int wm_setup_msix(struct wm_softc *); |
651 | static int wm_init(struct ifnet *); |
652 | static int wm_init_locked(struct ifnet *); |
653 | static void wm_turnon(struct wm_softc *); |
654 | static void wm_turnoff(struct wm_softc *); |
655 | static void wm_stop(struct ifnet *, int); |
656 | static void wm_stop_locked(struct ifnet *, int); |
657 | static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); |
658 | static void wm_82547_txfifo_stall(void *); |
659 | static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); |
660 | /* DMA related */ |
661 | static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); |
662 | static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); |
663 | static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); |
664 | static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, |
665 | struct wm_txqueue *); |
666 | static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); |
667 | static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); |
668 | static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, |
669 | struct wm_rxqueue *); |
670 | static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
671 | static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
672 | static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
673 | static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
674 | static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
675 | static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
676 | static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, |
677 | struct wm_txqueue *); |
678 | static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, |
679 | struct wm_rxqueue *); |
680 | static int wm_alloc_txrx_queues(struct wm_softc *); |
681 | static void wm_free_txrx_queues(struct wm_softc *); |
682 | static int wm_init_txrx_queues(struct wm_softc *); |
683 | /* Start */ |
684 | static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, |
685 | uint32_t *, uint8_t *); |
686 | static void wm_start(struct ifnet *); |
687 | static void wm_start_locked(struct ifnet *); |
688 | static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, |
689 | struct wm_txsoft *, uint32_t *, uint32_t *, bool *); |
690 | static void wm_nq_start(struct ifnet *); |
691 | static void wm_nq_start_locked(struct ifnet *); |
692 | static int wm_nq_transmit(struct ifnet *, struct mbuf *); |
693 | static inline int wm_nq_select_txqueue(struct ifnet *, struct mbuf *); |
694 | static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); |
695 | static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); |
696 | /* Interrupt */ |
697 | static int wm_txeof(struct wm_softc *, struct wm_txqueue *); |
698 | static void wm_rxeof(struct wm_rxqueue *); |
699 | static void wm_linkintr_gmii(struct wm_softc *, uint32_t); |
700 | static void wm_linkintr_tbi(struct wm_softc *, uint32_t); |
701 | static void wm_linkintr_serdes(struct wm_softc *, uint32_t); |
702 | static void wm_linkintr(struct wm_softc *, uint32_t); |
703 | static int wm_intr_legacy(void *); |
704 | static int wm_txrxintr_msix(void *); |
705 | static int wm_linkintr_msix(void *); |
706 | |
707 | /* |
708 | * Media related. |
709 | * GMII, SGMII, TBI, SERDES and SFP. |
710 | */ |
711 | /* Common */ |
712 | static void wm_tbi_serdes_set_linkled(struct wm_softc *); |
713 | /* GMII related */ |
714 | static void wm_gmii_reset(struct wm_softc *); |
715 | static int wm_get_phy_id_82575(struct wm_softc *); |
716 | static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); |
717 | static int wm_gmii_mediachange(struct ifnet *); |
718 | static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); |
719 | static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); |
720 | static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); |
721 | static int wm_gmii_i82543_readreg(device_t, int, int); |
722 | static void wm_gmii_i82543_writereg(device_t, int, int, int); |
723 | static int wm_gmii_mdic_readreg(device_t, int, int); |
724 | static void wm_gmii_mdic_writereg(device_t, int, int, int); |
725 | static int wm_gmii_i82544_readreg(device_t, int, int); |
726 | static void wm_gmii_i82544_writereg(device_t, int, int, int); |
727 | static int wm_gmii_i80003_readreg(device_t, int, int); |
728 | static void wm_gmii_i80003_writereg(device_t, int, int, int); |
729 | static int wm_gmii_bm_readreg(device_t, int, int); |
730 | static void wm_gmii_bm_writereg(device_t, int, int, int); |
731 | static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); |
732 | static int wm_gmii_hv_readreg(device_t, int, int); |
733 | static int wm_gmii_hv_readreg_locked(device_t, int, int); |
734 | static void wm_gmii_hv_writereg(device_t, int, int, int); |
735 | static void wm_gmii_hv_writereg_locked(device_t, int, int, int); |
736 | static int wm_gmii_82580_readreg(device_t, int, int); |
737 | static void wm_gmii_82580_writereg(device_t, int, int, int); |
738 | static int wm_gmii_gs40g_readreg(device_t, int, int); |
739 | static void wm_gmii_gs40g_writereg(device_t, int, int, int); |
740 | static void wm_gmii_statchg(struct ifnet *); |
741 | /* |
742 | * kumeran related (80003, ICH* and PCH*). |
743 | * These functions are not for accessing MII registers but for accessing |
744 | * kumeran specific registers. |
745 | */ |
746 | static int wm_kmrn_readreg(struct wm_softc *, int); |
747 | static int wm_kmrn_readreg_locked(struct wm_softc *, int); |
748 | static void wm_kmrn_writereg(struct wm_softc *, int, int); |
749 | static void wm_kmrn_writereg_locked(struct wm_softc *, int, int); |
750 | /* SGMII */ |
751 | static bool wm_sgmii_uses_mdio(struct wm_softc *); |
752 | static int wm_sgmii_readreg(device_t, int, int); |
753 | static void wm_sgmii_writereg(device_t, int, int, int); |
754 | /* TBI related */ |
755 | static void wm_tbi_mediainit(struct wm_softc *); |
756 | static int wm_tbi_mediachange(struct ifnet *); |
757 | static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); |
758 | static int wm_check_for_link(struct wm_softc *); |
759 | static void wm_tbi_tick(struct wm_softc *); |
760 | /* SERDES related */ |
761 | static void wm_serdes_power_up_link_82575(struct wm_softc *); |
762 | static int wm_serdes_mediachange(struct ifnet *); |
763 | static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); |
764 | static void wm_serdes_tick(struct wm_softc *); |
765 | /* SFP related */ |
766 | static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); |
767 | static uint32_t wm_sfp_get_media_type(struct wm_softc *); |
768 | |
769 | /* |
770 | * NVM related. |
771 | * Microwire, SPI (w/wo EERD) and Flash. |
772 | */ |
773 | /* Misc functions */ |
774 | static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); |
775 | static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); |
776 | static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); |
777 | /* Microwire */ |
778 | static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); |
779 | /* SPI */ |
780 | static int wm_nvm_ready_spi(struct wm_softc *); |
781 | static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); |
782 | /* Using with EERD */ |
783 | static int wm_poll_eerd_eewr_done(struct wm_softc *, int); |
784 | static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); |
785 | /* Flash */ |
786 | static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, |
787 | unsigned int *); |
788 | static int32_t wm_ich8_cycle_init(struct wm_softc *); |
789 | static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); |
790 | static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, |
791 | uint32_t *); |
792 | static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); |
793 | static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); |
794 | static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); |
795 | static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); |
796 | static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); |
797 | /* iNVM */ |
798 | static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); |
799 | static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); |
800 | /* Lock, detecting NVM type, validate checksum and read */ |
801 | static int wm_nvm_acquire(struct wm_softc *); |
802 | static void wm_nvm_release(struct wm_softc *); |
803 | static int wm_nvm_is_onboard_eeprom(struct wm_softc *); |
804 | static int wm_nvm_get_flash_presence_i210(struct wm_softc *); |
805 | static int wm_nvm_validate_checksum(struct wm_softc *); |
806 | static void wm_nvm_version_invm(struct wm_softc *); |
807 | static void wm_nvm_version(struct wm_softc *); |
808 | static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); |
809 | |
810 | /* |
811 | * Hardware semaphores. |
812 | * Very complexed... |
813 | */ |
814 | static int wm_get_null(struct wm_softc *); |
815 | static void wm_put_null(struct wm_softc *); |
816 | static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ |
817 | static void wm_put_swsm_semaphore(struct wm_softc *); |
818 | static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); |
819 | static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); |
820 | static int wm_get_phy_82575(struct wm_softc *); |
821 | static void wm_put_phy_82575(struct wm_softc *); |
822 | static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ |
823 | static void wm_put_swfwhw_semaphore(struct wm_softc *); |
824 | static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ |
825 | static void wm_put_swflag_ich8lan(struct wm_softc *); |
826 | static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */ |
827 | static void wm_put_nvm_ich8lan(struct wm_softc *); |
828 | static int wm_get_hw_semaphore_82573(struct wm_softc *); |
829 | static void wm_put_hw_semaphore_82573(struct wm_softc *); |
830 | |
831 | /* |
832 | * Management mode and power management related subroutines. |
833 | * BMC, AMT, suspend/resume and EEE. |
834 | */ |
835 | #if 0 |
836 | static int wm_check_mng_mode(struct wm_softc *); |
837 | static int wm_check_mng_mode_ich8lan(struct wm_softc *); |
838 | static int wm_check_mng_mode_82574(struct wm_softc *); |
839 | static int wm_check_mng_mode_generic(struct wm_softc *); |
840 | #endif |
841 | static int wm_enable_mng_pass_thru(struct wm_softc *); |
842 | static bool wm_phy_resetisblocked(struct wm_softc *); |
843 | static void wm_get_hw_control(struct wm_softc *); |
844 | static void wm_release_hw_control(struct wm_softc *); |
845 | static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); |
846 | static void wm_smbustopci(struct wm_softc *); |
847 | static void wm_init_manageability(struct wm_softc *); |
848 | static void wm_release_manageability(struct wm_softc *); |
849 | static void wm_get_wakeup(struct wm_softc *); |
850 | static void wm_ulp_disable(struct wm_softc *); |
851 | static void wm_enable_phy_wakeup(struct wm_softc *); |
852 | static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); |
853 | static void wm_enable_wakeup(struct wm_softc *); |
854 | /* LPLU (Low Power Link Up) */ |
855 | static void wm_lplu_d0_disable(struct wm_softc *); |
856 | static void wm_lplu_d0_disable_pch(struct wm_softc *); |
857 | /* EEE */ |
858 | static void wm_set_eee_i350(struct wm_softc *); |
859 | |
860 | /* |
861 | * Workarounds (mainly PHY related). |
862 | * Basically, PHY's workarounds are in the PHY drivers. |
863 | */ |
864 | static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); |
865 | static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); |
866 | static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); |
867 | static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); |
868 | static int wm_k1_gig_workaround_hv(struct wm_softc *, int); |
869 | static void wm_set_mdio_slow_mode_hv(struct wm_softc *); |
870 | static void wm_configure_k1_ich8lan(struct wm_softc *, int); |
871 | static void wm_reset_init_script_82575(struct wm_softc *); |
872 | static void wm_reset_mdicnfg_82580(struct wm_softc *); |
873 | static bool wm_phy_is_accessible_pchlan(struct wm_softc *); |
874 | static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); |
875 | static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); |
876 | static void wm_pll_workaround_i210(struct wm_softc *); |
877 | |
878 | CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), |
879 | wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); |
880 | |
881 | /* |
882 | * Devices supported by this driver. |
883 | */ |
884 | static const struct wm_product { |
885 | pci_vendor_id_t wmp_vendor; |
886 | pci_product_id_t wmp_product; |
887 | const char *wmp_name; |
888 | wm_chip_type wmp_type; |
889 | uint32_t wmp_flags; |
890 | #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN |
891 | #define WMP_F_FIBER WM_MEDIATYPE_FIBER |
892 | #define WMP_F_COPPER WM_MEDIATYPE_COPPER |
893 | #define WMP_F_SERDES WM_MEDIATYPE_SERDES |
894 | #define WMP_MEDIATYPE(x) ((x) & 0x03) |
895 | } wm_products[] = { |
896 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, |
897 | "Intel i82542 1000BASE-X Ethernet" , |
898 | WM_T_82542_2_1, WMP_F_FIBER }, |
899 | |
900 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, |
901 | "Intel i82543GC 1000BASE-X Ethernet" , |
902 | WM_T_82543, WMP_F_FIBER }, |
903 | |
904 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, |
905 | "Intel i82543GC 1000BASE-T Ethernet" , |
906 | WM_T_82543, WMP_F_COPPER }, |
907 | |
908 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, |
909 | "Intel i82544EI 1000BASE-T Ethernet" , |
910 | WM_T_82544, WMP_F_COPPER }, |
911 | |
912 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, |
913 | "Intel i82544EI 1000BASE-X Ethernet" , |
914 | WM_T_82544, WMP_F_FIBER }, |
915 | |
916 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, |
917 | "Intel i82544GC 1000BASE-T Ethernet" , |
918 | WM_T_82544, WMP_F_COPPER }, |
919 | |
920 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, |
921 | "Intel i82544GC (LOM) 1000BASE-T Ethernet" , |
922 | WM_T_82544, WMP_F_COPPER }, |
923 | |
924 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, |
925 | "Intel i82540EM 1000BASE-T Ethernet" , |
926 | WM_T_82540, WMP_F_COPPER }, |
927 | |
928 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, |
929 | "Intel i82540EM (LOM) 1000BASE-T Ethernet" , |
930 | WM_T_82540, WMP_F_COPPER }, |
931 | |
932 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, |
933 | "Intel i82540EP 1000BASE-T Ethernet" , |
934 | WM_T_82540, WMP_F_COPPER }, |
935 | |
936 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, |
937 | "Intel i82540EP 1000BASE-T Ethernet" , |
938 | WM_T_82540, WMP_F_COPPER }, |
939 | |
940 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, |
941 | "Intel i82540EP 1000BASE-T Ethernet" , |
942 | WM_T_82540, WMP_F_COPPER }, |
943 | |
944 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, |
945 | "Intel i82545EM 1000BASE-T Ethernet" , |
946 | WM_T_82545, WMP_F_COPPER }, |
947 | |
948 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, |
949 | "Intel i82545GM 1000BASE-T Ethernet" , |
950 | WM_T_82545_3, WMP_F_COPPER }, |
951 | |
952 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, |
953 | "Intel i82545GM 1000BASE-X Ethernet" , |
954 | WM_T_82545_3, WMP_F_FIBER }, |
955 | |
956 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, |
957 | "Intel i82545GM Gigabit Ethernet (SERDES)" , |
958 | WM_T_82545_3, WMP_F_SERDES }, |
959 | |
960 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, |
961 | "Intel i82546EB 1000BASE-T Ethernet" , |
962 | WM_T_82546, WMP_F_COPPER }, |
963 | |
964 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, |
965 | "Intel i82546EB 1000BASE-T Ethernet" , |
966 | WM_T_82546, WMP_F_COPPER }, |
967 | |
968 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, |
969 | "Intel i82545EM 1000BASE-X Ethernet" , |
970 | WM_T_82545, WMP_F_FIBER }, |
971 | |
972 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, |
973 | "Intel i82546EB 1000BASE-X Ethernet" , |
974 | WM_T_82546, WMP_F_FIBER }, |
975 | |
976 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, |
977 | "Intel i82546GB 1000BASE-T Ethernet" , |
978 | WM_T_82546_3, WMP_F_COPPER }, |
979 | |
980 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, |
981 | "Intel i82546GB 1000BASE-X Ethernet" , |
982 | WM_T_82546_3, WMP_F_FIBER }, |
983 | |
984 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, |
985 | "Intel i82546GB Gigabit Ethernet (SERDES)" , |
986 | WM_T_82546_3, WMP_F_SERDES }, |
987 | |
988 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, |
989 | "i82546GB quad-port Gigabit Ethernet" , |
990 | WM_T_82546_3, WMP_F_COPPER }, |
991 | |
992 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, |
993 | "i82546GB quad-port Gigabit Ethernet (KSP3)" , |
994 | WM_T_82546_3, WMP_F_COPPER }, |
995 | |
996 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, |
997 | "Intel PRO/1000MT (82546GB)" , |
998 | WM_T_82546_3, WMP_F_COPPER }, |
999 | |
1000 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, |
1001 | "Intel i82541EI 1000BASE-T Ethernet" , |
1002 | WM_T_82541, WMP_F_COPPER }, |
1003 | |
1004 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, |
1005 | "Intel i82541ER (LOM) 1000BASE-T Ethernet" , |
1006 | WM_T_82541, WMP_F_COPPER }, |
1007 | |
1008 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, |
1009 | "Intel i82541EI Mobile 1000BASE-T Ethernet" , |
1010 | WM_T_82541, WMP_F_COPPER }, |
1011 | |
1012 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, |
1013 | "Intel i82541ER 1000BASE-T Ethernet" , |
1014 | WM_T_82541_2, WMP_F_COPPER }, |
1015 | |
1016 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, |
1017 | "Intel i82541GI 1000BASE-T Ethernet" , |
1018 | WM_T_82541_2, WMP_F_COPPER }, |
1019 | |
1020 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, |
1021 | "Intel i82541GI Mobile 1000BASE-T Ethernet" , |
1022 | WM_T_82541_2, WMP_F_COPPER }, |
1023 | |
1024 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, |
1025 | "Intel i82541PI 1000BASE-T Ethernet" , |
1026 | WM_T_82541_2, WMP_F_COPPER }, |
1027 | |
1028 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, |
1029 | "Intel i82547EI 1000BASE-T Ethernet" , |
1030 | WM_T_82547, WMP_F_COPPER }, |
1031 | |
1032 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, |
1033 | "Intel i82547EI Mobile 1000BASE-T Ethernet" , |
1034 | WM_T_82547, WMP_F_COPPER }, |
1035 | |
1036 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, |
1037 | "Intel i82547GI 1000BASE-T Ethernet" , |
1038 | WM_T_82547_2, WMP_F_COPPER }, |
1039 | |
1040 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, |
1041 | "Intel PRO/1000 PT (82571EB)" , |
1042 | WM_T_82571, WMP_F_COPPER }, |
1043 | |
1044 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, |
1045 | "Intel PRO/1000 PF (82571EB)" , |
1046 | WM_T_82571, WMP_F_FIBER }, |
1047 | |
1048 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, |
1049 | "Intel PRO/1000 PB (82571EB)" , |
1050 | WM_T_82571, WMP_F_SERDES }, |
1051 | |
1052 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, |
1053 | "Intel PRO/1000 QT (82571EB)" , |
1054 | WM_T_82571, WMP_F_COPPER }, |
1055 | |
1056 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, |
1057 | "Intel PRO/1000 PT Quad Port Server Adapter" , |
1058 | WM_T_82571, WMP_F_COPPER, }, |
1059 | |
1060 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, |
1061 | "Intel Gigabit PT Quad Port Server ExpressModule" , |
1062 | WM_T_82571, WMP_F_COPPER, }, |
1063 | |
1064 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, |
1065 | "Intel 82571EB Dual Gigabit Ethernet (SERDES)" , |
1066 | WM_T_82571, WMP_F_SERDES, }, |
1067 | |
1068 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, |
1069 | "Intel 82571EB Quad Gigabit Ethernet (SERDES)" , |
1070 | WM_T_82571, WMP_F_SERDES, }, |
1071 | |
1072 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, |
1073 | "Intel 82571EB Quad 1000baseX Ethernet" , |
1074 | WM_T_82571, WMP_F_FIBER, }, |
1075 | |
1076 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, |
1077 | "Intel i82572EI 1000baseT Ethernet" , |
1078 | WM_T_82572, WMP_F_COPPER }, |
1079 | |
1080 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, |
1081 | "Intel i82572EI 1000baseX Ethernet" , |
1082 | WM_T_82572, WMP_F_FIBER }, |
1083 | |
1084 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, |
1085 | "Intel i82572EI Gigabit Ethernet (SERDES)" , |
1086 | WM_T_82572, WMP_F_SERDES }, |
1087 | |
1088 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, |
1089 | "Intel i82572EI 1000baseT Ethernet" , |
1090 | WM_T_82572, WMP_F_COPPER }, |
1091 | |
1092 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, |
1093 | "Intel i82573E" , |
1094 | WM_T_82573, WMP_F_COPPER }, |
1095 | |
1096 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, |
1097 | "Intel i82573E IAMT" , |
1098 | WM_T_82573, WMP_F_COPPER }, |
1099 | |
1100 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, |
1101 | "Intel i82573L Gigabit Ethernet" , |
1102 | WM_T_82573, WMP_F_COPPER }, |
1103 | |
1104 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, |
1105 | "Intel i82574L" , |
1106 | WM_T_82574, WMP_F_COPPER }, |
1107 | |
1108 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA, |
1109 | "Intel i82574L" , |
1110 | WM_T_82574, WMP_F_COPPER }, |
1111 | |
1112 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, |
1113 | "Intel i82583V" , |
1114 | WM_T_82583, WMP_F_COPPER }, |
1115 | |
1116 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, |
1117 | "i80003 dual 1000baseT Ethernet" , |
1118 | WM_T_80003, WMP_F_COPPER }, |
1119 | |
1120 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, |
1121 | "i80003 dual 1000baseX Ethernet" , |
1122 | WM_T_80003, WMP_F_COPPER }, |
1123 | |
1124 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, |
1125 | "Intel i80003ES2 dual Gigabit Ethernet (SERDES)" , |
1126 | WM_T_80003, WMP_F_SERDES }, |
1127 | |
1128 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, |
1129 | "Intel i80003 1000baseT Ethernet" , |
1130 | WM_T_80003, WMP_F_COPPER }, |
1131 | |
1132 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, |
1133 | "Intel i80003 Gigabit Ethernet (SERDES)" , |
1134 | WM_T_80003, WMP_F_SERDES }, |
1135 | |
1136 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, |
1137 | "Intel i82801H (M_AMT) LAN Controller" , |
1138 | WM_T_ICH8, WMP_F_COPPER }, |
1139 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, |
1140 | "Intel i82801H (AMT) LAN Controller" , |
1141 | WM_T_ICH8, WMP_F_COPPER }, |
1142 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, |
1143 | "Intel i82801H LAN Controller" , |
1144 | WM_T_ICH8, WMP_F_COPPER }, |
1145 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, |
1146 | "Intel i82801H (IFE) 10/100 LAN Controller" , |
1147 | WM_T_ICH8, WMP_F_COPPER }, |
1148 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, |
1149 | "Intel i82801H (M) LAN Controller" , |
1150 | WM_T_ICH8, WMP_F_COPPER }, |
1151 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, |
1152 | "Intel i82801H IFE (GT) 10/100 LAN Controller" , |
1153 | WM_T_ICH8, WMP_F_COPPER }, |
1154 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, |
1155 | "Intel i82801H IFE (G) 10/100 LAN Controller" , |
1156 | WM_T_ICH8, WMP_F_COPPER }, |
1157 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3, |
1158 | "82567V-3 LAN Controller" , |
1159 | WM_T_ICH8, WMP_F_COPPER }, |
1160 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, |
1161 | "82801I (AMT) LAN Controller" , |
1162 | WM_T_ICH9, WMP_F_COPPER }, |
1163 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, |
1164 | "82801I 10/100 LAN Controller" , |
1165 | WM_T_ICH9, WMP_F_COPPER }, |
1166 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, |
1167 | "82801I (G) 10/100 LAN Controller" , |
1168 | WM_T_ICH9, WMP_F_COPPER }, |
1169 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, |
1170 | "82801I (GT) 10/100 LAN Controller" , |
1171 | WM_T_ICH9, WMP_F_COPPER }, |
1172 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, |
1173 | "82801I (C) LAN Controller" , |
1174 | WM_T_ICH9, WMP_F_COPPER }, |
1175 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, |
1176 | "82801I mobile LAN Controller" , |
1177 | WM_T_ICH9, WMP_F_COPPER }, |
1178 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, |
1179 | "82801I mobile (V) LAN Controller" , |
1180 | WM_T_ICH9, WMP_F_COPPER }, |
1181 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, |
1182 | "82801I mobile (AMT) LAN Controller" , |
1183 | WM_T_ICH9, WMP_F_COPPER }, |
1184 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, |
1185 | "82567LM-4 LAN Controller" , |
1186 | WM_T_ICH9, WMP_F_COPPER }, |
1187 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, |
1188 | "82567LM-2 LAN Controller" , |
1189 | WM_T_ICH10, WMP_F_COPPER }, |
1190 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, |
1191 | "82567LF-2 LAN Controller" , |
1192 | WM_T_ICH10, WMP_F_COPPER }, |
1193 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, |
1194 | "82567LM-3 LAN Controller" , |
1195 | WM_T_ICH10, WMP_F_COPPER }, |
1196 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, |
1197 | "82567LF-3 LAN Controller" , |
1198 | WM_T_ICH10, WMP_F_COPPER }, |
1199 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, |
1200 | "82567V-2 LAN Controller" , |
1201 | WM_T_ICH10, WMP_F_COPPER }, |
1202 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V, |
1203 | "82567V-3? LAN Controller" , |
1204 | WM_T_ICH10, WMP_F_COPPER }, |
1205 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE, |
1206 | "HANKSVILLE LAN Controller" , |
1207 | WM_T_ICH10, WMP_F_COPPER }, |
1208 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM, |
1209 | "PCH LAN (82577LM) Controller" , |
1210 | WM_T_PCH, WMP_F_COPPER }, |
1211 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC, |
1212 | "PCH LAN (82577LC) Controller" , |
1213 | WM_T_PCH, WMP_F_COPPER }, |
1214 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM, |
1215 | "PCH LAN (82578DM) Controller" , |
1216 | WM_T_PCH, WMP_F_COPPER }, |
1217 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC, |
1218 | "PCH LAN (82578DC) Controller" , |
1219 | WM_T_PCH, WMP_F_COPPER }, |
1220 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM, |
1221 | "PCH2 LAN (82579LM) Controller" , |
1222 | WM_T_PCH2, WMP_F_COPPER }, |
1223 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V, |
1224 | "PCH2 LAN (82579V) Controller" , |
1225 | WM_T_PCH2, WMP_F_COPPER }, |
1226 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER, |
1227 | "82575EB dual-1000baseT Ethernet" , |
1228 | WM_T_82575, WMP_F_COPPER }, |
1229 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES, |
1230 | "82575EB dual-1000baseX Ethernet (SERDES)" , |
1231 | WM_T_82575, WMP_F_SERDES }, |
1232 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER, |
1233 | "82575GB quad-1000baseT Ethernet" , |
1234 | WM_T_82575, WMP_F_COPPER }, |
1235 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM, |
1236 | "82575GB quad-1000baseT Ethernet (PM)" , |
1237 | WM_T_82575, WMP_F_COPPER }, |
1238 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER, |
1239 | "82576 1000BaseT Ethernet" , |
1240 | WM_T_82576, WMP_F_COPPER }, |
1241 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER, |
1242 | "82576 1000BaseX Ethernet" , |
1243 | WM_T_82576, WMP_F_FIBER }, |
1244 | |
1245 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES, |
1246 | "82576 gigabit Ethernet (SERDES)" , |
1247 | WM_T_82576, WMP_F_SERDES }, |
1248 | |
1249 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER, |
1250 | "82576 quad-1000BaseT Ethernet" , |
1251 | WM_T_82576, WMP_F_COPPER }, |
1252 | |
1253 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2, |
1254 | "82576 Gigabit ET2 Quad Port Server Adapter" , |
1255 | WM_T_82576, WMP_F_COPPER }, |
1256 | |
1257 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS, |
1258 | "82576 gigabit Ethernet" , |
1259 | WM_T_82576, WMP_F_COPPER }, |
1260 | |
1261 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES, |
1262 | "82576 gigabit Ethernet (SERDES)" , |
1263 | WM_T_82576, WMP_F_SERDES }, |
1264 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD, |
1265 | "82576 quad-gigabit Ethernet (SERDES)" , |
1266 | WM_T_82576, WMP_F_SERDES }, |
1267 | |
1268 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER, |
1269 | "82580 1000BaseT Ethernet" , |
1270 | WM_T_82580, WMP_F_COPPER }, |
1271 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER, |
1272 | "82580 1000BaseX Ethernet" , |
1273 | WM_T_82580, WMP_F_FIBER }, |
1274 | |
1275 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES, |
1276 | "82580 1000BaseT Ethernet (SERDES)" , |
1277 | WM_T_82580, WMP_F_SERDES }, |
1278 | |
1279 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII, |
1280 | "82580 gigabit Ethernet (SGMII)" , |
1281 | WM_T_82580, WMP_F_COPPER }, |
1282 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL, |
1283 | "82580 dual-1000BaseT Ethernet" , |
1284 | WM_T_82580, WMP_F_COPPER }, |
1285 | |
1286 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, |
1287 | "82580 quad-1000BaseX Ethernet" , |
1288 | WM_T_82580, WMP_F_FIBER }, |
1289 | |
1290 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII, |
1291 | "DH89XXCC Gigabit Ethernet (SGMII)" , |
1292 | WM_T_82580, WMP_F_COPPER }, |
1293 | |
1294 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES, |
1295 | "DH89XXCC Gigabit Ethernet (SERDES)" , |
1296 | WM_T_82580, WMP_F_SERDES }, |
1297 | |
1298 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE, |
1299 | "DH89XXCC 1000BASE-KX Ethernet" , |
1300 | WM_T_82580, WMP_F_SERDES }, |
1301 | |
1302 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP, |
1303 | "DH89XXCC Gigabit Ethernet (SFP)" , |
1304 | WM_T_82580, WMP_F_SERDES }, |
1305 | |
1306 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, |
1307 | "I350 Gigabit Network Connection" , |
1308 | WM_T_I350, WMP_F_COPPER }, |
1309 | |
1310 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, |
1311 | "I350 Gigabit Fiber Network Connection" , |
1312 | WM_T_I350, WMP_F_FIBER }, |
1313 | |
1314 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, |
1315 | "I350 Gigabit Backplane Connection" , |
1316 | WM_T_I350, WMP_F_SERDES }, |
1317 | |
1318 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4, |
1319 | "I350 Quad Port Gigabit Ethernet" , |
1320 | WM_T_I350, WMP_F_SERDES }, |
1321 | |
1322 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, |
1323 | "I350 Gigabit Connection" , |
1324 | WM_T_I350, WMP_F_COPPER }, |
1325 | |
1326 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX, |
1327 | "I354 Gigabit Ethernet (KX)" , |
1328 | WM_T_I354, WMP_F_SERDES }, |
1329 | |
1330 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII, |
1331 | "I354 Gigabit Ethernet (SGMII)" , |
1332 | WM_T_I354, WMP_F_COPPER }, |
1333 | |
1334 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE, |
1335 | "I354 Gigabit Ethernet (2.5G)" , |
1336 | WM_T_I354, WMP_F_COPPER }, |
1337 | |
1338 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, |
1339 | "I210-T1 Ethernet Server Adapter" , |
1340 | WM_T_I210, WMP_F_COPPER }, |
1341 | |
1342 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, |
1343 | "I210 Ethernet (Copper OEM)" , |
1344 | WM_T_I210, WMP_F_COPPER }, |
1345 | |
1346 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, |
1347 | "I210 Ethernet (Copper IT)" , |
1348 | WM_T_I210, WMP_F_COPPER }, |
1349 | |
1350 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF, |
1351 | "I210 Ethernet (FLASH less)" , |
1352 | WM_T_I210, WMP_F_COPPER }, |
1353 | |
1354 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, |
1355 | "I210 Gigabit Ethernet (Fiber)" , |
1356 | WM_T_I210, WMP_F_FIBER }, |
1357 | |
1358 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, |
1359 | "I210 Gigabit Ethernet (SERDES)" , |
1360 | WM_T_I210, WMP_F_SERDES }, |
1361 | |
1362 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF, |
1363 | "I210 Gigabit Ethernet (FLASH less)" , |
1364 | WM_T_I210, WMP_F_SERDES }, |
1365 | |
1366 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, |
1367 | "I210 Gigabit Ethernet (SGMII)" , |
1368 | WM_T_I210, WMP_F_COPPER }, |
1369 | |
1370 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, |
1371 | "I211 Ethernet (COPPER)" , |
1372 | WM_T_I211, WMP_F_COPPER }, |
1373 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, |
1374 | "I217 V Ethernet Connection" , |
1375 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1376 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, |
1377 | "I217 LM Ethernet Connection" , |
1378 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1379 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, |
1380 | "I218 V Ethernet Connection" , |
1381 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1382 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2, |
1383 | "I218 V Ethernet Connection" , |
1384 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1385 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3, |
1386 | "I218 V Ethernet Connection" , |
1387 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1388 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, |
1389 | "I218 LM Ethernet Connection" , |
1390 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1391 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2, |
1392 | "I218 LM Ethernet Connection" , |
1393 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1394 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3, |
1395 | "I218 LM Ethernet Connection" , |
1396 | WM_T_PCH_LPT, WMP_F_COPPER }, |
1397 | #if 0 |
1398 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V, |
1399 | "I219 V Ethernet Connection" , |
1400 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1401 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2, |
1402 | "I219 V Ethernet Connection" , |
1403 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1404 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4, |
1405 | "I219 V Ethernet Connection" , |
1406 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1407 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5, |
1408 | "I219 V Ethernet Connection" , |
1409 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1410 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM, |
1411 | "I219 LM Ethernet Connection" , |
1412 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1413 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2, |
1414 | "I219 LM Ethernet Connection" , |
1415 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1416 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3, |
1417 | "I219 LM Ethernet Connection" , |
1418 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1419 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4, |
1420 | "I219 LM Ethernet Connection" , |
1421 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1422 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5, |
1423 | "I219 LM Ethernet Connection" , |
1424 | WM_T_PCH_SPT, WMP_F_COPPER }, |
1425 | #endif |
1426 | { 0, 0, |
1427 | NULL, |
1428 | 0, 0 }, |
1429 | }; |
1430 | |
1431 | /* |
1432 | * Register read/write functions. |
1433 | * Other than CSR_{READ|WRITE}(). |
1434 | */ |
1435 | |
1436 | #if 0 /* Not currently used */ |
1437 | static inline uint32_t |
1438 | wm_io_read(struct wm_softc *sc, int reg) |
1439 | { |
1440 | |
1441 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); |
1442 | return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); |
1443 | } |
1444 | #endif |
1445 | |
1446 | static inline void |
1447 | wm_io_write(struct wm_softc *sc, int reg, uint32_t val) |
1448 | { |
1449 | |
1450 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); |
1451 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); |
1452 | } |
1453 | |
1454 | static inline void |
1455 | wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off, |
1456 | uint32_t data) |
1457 | { |
1458 | uint32_t regval; |
1459 | int i; |
1460 | |
1461 | regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT); |
1462 | |
1463 | CSR_WRITE(sc, reg, regval); |
1464 | |
1465 | for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) { |
1466 | delay(5); |
1467 | if (CSR_READ(sc, reg) & SCTL_CTL_READY) |
1468 | break; |
1469 | } |
1470 | if (i == SCTL_CTL_POLL_TIMEOUT) { |
1471 | aprint_error("%s: WARNING:" |
1472 | " i82575 reg 0x%08x setup did not indicate ready\n" , |
1473 | device_xname(sc->sc_dev), reg); |
1474 | } |
1475 | } |
1476 | |
1477 | static inline void |
1478 | wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) |
1479 | { |
1480 | wa->wa_low = htole32(v & 0xffffffffU); |
1481 | if (sizeof(bus_addr_t) == 8) |
1482 | wa->wa_high = htole32((uint64_t) v >> 32); |
1483 | else |
1484 | wa->wa_high = 0; |
1485 | } |
1486 | |
1487 | /* |
1488 | * Descriptor sync/init functions. |
1489 | */ |
1490 | static inline void |
1491 | wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops) |
1492 | { |
1493 | struct wm_softc *sc = txq->txq_sc; |
1494 | |
1495 | /* If it will wrap around, sync to the end of the ring. */ |
1496 | if ((start + num) > WM_NTXDESC(txq)) { |
1497 | bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, |
1498 | WM_CDTXOFF(txq, start), txq->txq_descsize * |
1499 | (WM_NTXDESC(txq) - start), ops); |
1500 | num -= (WM_NTXDESC(txq) - start); |
1501 | start = 0; |
1502 | } |
1503 | |
1504 | /* Now sync whatever is left. */ |
1505 | bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, |
1506 | WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops); |
1507 | } |
1508 | |
1509 | static inline void |
1510 | wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops) |
1511 | { |
1512 | struct wm_softc *sc = rxq->rxq_sc; |
1513 | |
1514 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap, |
1515 | WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops); |
1516 | } |
1517 | |
1518 | static inline void |
1519 | wm_init_rxdesc(struct wm_rxqueue *rxq, int start) |
1520 | { |
1521 | struct wm_softc *sc = rxq->rxq_sc; |
1522 | struct wm_rxsoft *rxs = &rxq->rxq_soft[start]; |
1523 | wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start]; |
1524 | struct mbuf *m = rxs->rxs_mbuf; |
1525 | |
1526 | /* |
1527 | * Note: We scoot the packet forward 2 bytes in the buffer |
1528 | * so that the payload after the Ethernet header is aligned |
1529 | * to a 4-byte boundary. |
1530 | |
1531 | * XXX BRAINDAMAGE ALERT! |
1532 | * The stupid chip uses the same size for every buffer, which |
1533 | * is set in the Receive Control register. We are using the 2K |
1534 | * size option, but what we REALLY want is (2K - 2)! For this |
1535 | * reason, we can't "scoot" packets longer than the standard |
1536 | * Ethernet MTU. On strict-alignment platforms, if the total |
1537 | * size exceeds (2K - 2) we set align_tweak to 0 and let |
1538 | * the upper layer copy the headers. |
1539 | */ |
1540 | m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak; |
1541 | |
1542 | wm_set_dma_addr(&rxd->wrx_addr, |
1543 | rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); |
1544 | rxd->wrx_len = 0; |
1545 | rxd->wrx_cksum = 0; |
1546 | rxd->wrx_status = 0; |
1547 | rxd->wrx_errors = 0; |
1548 | rxd->wrx_special = 0; |
1549 | wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1550 | |
1551 | CSR_WRITE(sc, rxq->rxq_rdt_reg, start); |
1552 | } |
1553 | |
1554 | /* |
1555 | * Device driver interface functions and commonly used functions. |
1556 | * match, attach, detach, init, start, stop, ioctl, watchdog and so on. |
1557 | */ |
1558 | |
1559 | /* Lookup supported device table */ |
1560 | static const struct wm_product * |
1561 | wm_lookup(const struct pci_attach_args *pa) |
1562 | { |
1563 | const struct wm_product *wmp; |
1564 | |
1565 | for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { |
1566 | if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && |
1567 | PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) |
1568 | return wmp; |
1569 | } |
1570 | return NULL; |
1571 | } |
1572 | |
1573 | /* The match function (ca_match) */ |
1574 | static int |
1575 | wm_match(device_t parent, cfdata_t cf, void *aux) |
1576 | { |
1577 | struct pci_attach_args *pa = aux; |
1578 | |
1579 | if (wm_lookup(pa) != NULL) |
1580 | return 1; |
1581 | |
1582 | return 0; |
1583 | } |
1584 | |
1585 | /* The attach function (ca_attach) */ |
1586 | static void |
1587 | wm_attach(device_t parent, device_t self, void *aux) |
1588 | { |
1589 | struct wm_softc *sc = device_private(self); |
1590 | struct pci_attach_args *pa = aux; |
1591 | prop_dictionary_t dict; |
1592 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1593 | pci_chipset_tag_t pc = pa->pa_pc; |
1594 | int counts[PCI_INTR_TYPE_SIZE]; |
1595 | pci_intr_type_t max_type; |
1596 | const char *eetype, *xname; |
1597 | bus_space_tag_t memt; |
1598 | bus_space_handle_t memh; |
1599 | bus_size_t memsize; |
1600 | int memh_valid; |
1601 | int i, error; |
1602 | const struct wm_product *wmp; |
1603 | prop_data_t ea; |
1604 | prop_number_t pn; |
1605 | uint8_t enaddr[ETHER_ADDR_LEN]; |
1606 | uint16_t cfg1, cfg2, swdpin, nvmword; |
1607 | pcireg_t preg, memtype; |
1608 | uint16_t eeprom_data, apme_mask; |
1609 | bool force_clear_smbi; |
1610 | uint32_t link_mode; |
1611 | uint32_t reg; |
1612 | |
1613 | sc->sc_dev = self; |
1614 | callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS); |
1615 | sc->sc_core_stopping = false; |
1616 | |
1617 | wmp = wm_lookup(pa); |
1618 | #ifdef DIAGNOSTIC |
1619 | if (wmp == NULL) { |
1620 | printf("\n" ); |
1621 | panic("wm_attach: impossible" ); |
1622 | } |
1623 | #endif |
1624 | sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags); |
1625 | |
1626 | sc->sc_pc = pa->pa_pc; |
1627 | sc->sc_pcitag = pa->pa_tag; |
1628 | |
1629 | if (pci_dma64_available(pa)) |
1630 | sc->sc_dmat = pa->pa_dmat64; |
1631 | else |
1632 | sc->sc_dmat = pa->pa_dmat; |
1633 | |
1634 | sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id); |
1635 | sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG)); |
1636 | pci_aprint_devinfo_fancy(pa, "Ethernet controller" , wmp->wmp_name, 1); |
1637 | |
1638 | sc->sc_type = wmp->wmp_type; |
1639 | |
1640 | /* Set default function pointers */ |
1641 | sc->phy.acquire = wm_get_null; |
1642 | sc->phy.release = wm_put_null; |
1643 | sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000; |
1644 | |
1645 | if (sc->sc_type < WM_T_82543) { |
1646 | if (sc->sc_rev < 2) { |
1647 | aprint_error_dev(sc->sc_dev, |
1648 | "i82542 must be at least rev. 2\n" ); |
1649 | return; |
1650 | } |
1651 | if (sc->sc_rev < 3) |
1652 | sc->sc_type = WM_T_82542_2_0; |
1653 | } |
1654 | |
1655 | /* |
1656 | * Disable MSI for Errata: |
1657 | * "Message Signaled Interrupt Feature May Corrupt Write Transactions" |
1658 | * |
1659 | * 82544: Errata 25 |
1660 | * 82540: Errata 6 (easy to reproduce device timeout) |
1661 | * 82545: Errata 4 (easy to reproduce device timeout) |
1662 | * 82546: Errata 26 (easy to reproduce device timeout) |
1663 | * 82541: Errata 7 (easy to reproduce device timeout) |
1664 | * |
1665 | * "Byte Enables 2 and 3 are not set on MSI writes" |
1666 | * |
1667 | * 82571 & 82572: Errata 63 |
1668 | */ |
1669 | if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571) |
1670 | || (sc->sc_type == WM_T_82572)) |
1671 | pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY; |
1672 | |
1673 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) |
1674 | || (sc->sc_type == WM_T_82580) |
1675 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) |
1676 | || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) |
1677 | sc->sc_flags |= WM_F_NEWQUEUE; |
1678 | |
1679 | /* Set device properties (mactype) */ |
1680 | dict = device_properties(sc->sc_dev); |
1681 | prop_dictionary_set_uint32(dict, "mactype" , sc->sc_type); |
1682 | |
1683 | /* |
1684 | * Map the device. All devices support memory-mapped acccess, |
1685 | * and it is really required for normal operation. |
1686 | */ |
1687 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); |
1688 | switch (memtype) { |
1689 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
1690 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
1691 | memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, |
1692 | memtype, 0, &memt, &memh, NULL, &memsize) == 0); |
1693 | break; |
1694 | default: |
1695 | memh_valid = 0; |
1696 | break; |
1697 | } |
1698 | |
1699 | if (memh_valid) { |
1700 | sc->sc_st = memt; |
1701 | sc->sc_sh = memh; |
1702 | sc->sc_ss = memsize; |
1703 | } else { |
1704 | aprint_error_dev(sc->sc_dev, |
1705 | "unable to map device registers\n" ); |
1706 | return; |
1707 | } |
1708 | |
1709 | /* |
1710 | * In addition, i82544 and later support I/O mapped indirect |
1711 | * register access. It is not desirable (nor supported in |
1712 | * this driver) to use it for normal operation, though it is |
1713 | * required to work around bugs in some chip versions. |
1714 | */ |
1715 | if (sc->sc_type >= WM_T_82544) { |
1716 | /* First we have to find the I/O BAR. */ |
1717 | for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { |
1718 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i); |
1719 | if (memtype == PCI_MAPREG_TYPE_IO) |
1720 | break; |
1721 | if (PCI_MAPREG_MEM_TYPE(memtype) == |
1722 | PCI_MAPREG_MEM_TYPE_64BIT) |
1723 | i += 4; /* skip high bits, too */ |
1724 | } |
1725 | if (i < PCI_MAPREG_END) { |
1726 | /* |
1727 | * We found PCI_MAPREG_TYPE_IO. Note that 82580 |
1728 | * (and newer?) chip has no PCI_MAPREG_TYPE_IO. |
1729 | * It's no problem because newer chips has no this |
1730 | * bug. |
1731 | * |
1732 | * The i8254x doesn't apparently respond when the |
1733 | * I/O BAR is 0, which looks somewhat like it's not |
1734 | * been configured. |
1735 | */ |
1736 | preg = pci_conf_read(pc, pa->pa_tag, i); |
1737 | if (PCI_MAPREG_MEM_ADDR(preg) == 0) { |
1738 | aprint_error_dev(sc->sc_dev, |
1739 | "WARNING: I/O BAR at zero.\n" ); |
1740 | } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, |
1741 | 0, &sc->sc_iot, &sc->sc_ioh, |
1742 | NULL, &sc->sc_ios) == 0) { |
1743 | sc->sc_flags |= WM_F_IOH_VALID; |
1744 | } else { |
1745 | aprint_error_dev(sc->sc_dev, |
1746 | "WARNING: unable to map I/O space\n" ); |
1747 | } |
1748 | } |
1749 | |
1750 | } |
1751 | |
1752 | /* Enable bus mastering. Disable MWI on the i82542 2.0. */ |
1753 | preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
1754 | preg |= PCI_COMMAND_MASTER_ENABLE; |
1755 | if (sc->sc_type < WM_T_82542_2_1) |
1756 | preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; |
1757 | pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); |
1758 | |
1759 | /* power up chip */ |
1760 | if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, |
1761 | NULL)) && error != EOPNOTSUPP) { |
1762 | aprint_error_dev(sc->sc_dev, "cannot activate %d\n" , error); |
1763 | return; |
1764 | } |
1765 | |
1766 | wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag)); |
1767 | |
1768 | /* Allocation settings */ |
1769 | max_type = PCI_INTR_TYPE_MSIX; |
1770 | counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1; |
1771 | counts[PCI_INTR_TYPE_MSI] = 1; |
1772 | counts[PCI_INTR_TYPE_INTX] = 1; |
1773 | |
1774 | alloc_retry: |
1775 | if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) { |
1776 | aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n" ); |
1777 | return; |
1778 | } |
1779 | |
1780 | if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) { |
1781 | error = wm_setup_msix(sc); |
1782 | if (error) { |
1783 | pci_intr_release(pc, sc->sc_intrs, |
1784 | counts[PCI_INTR_TYPE_MSIX]); |
1785 | |
1786 | /* Setup for MSI: Disable MSI-X */ |
1787 | max_type = PCI_INTR_TYPE_MSI; |
1788 | counts[PCI_INTR_TYPE_MSI] = 1; |
1789 | counts[PCI_INTR_TYPE_INTX] = 1; |
1790 | goto alloc_retry; |
1791 | } |
1792 | } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) { |
1793 | wm_adjust_qnum(sc, 0); /* must not use multiqueue */ |
1794 | error = wm_setup_legacy(sc); |
1795 | if (error) { |
1796 | pci_intr_release(sc->sc_pc, sc->sc_intrs, |
1797 | counts[PCI_INTR_TYPE_MSI]); |
1798 | |
1799 | /* The next try is for INTx: Disable MSI */ |
1800 | max_type = PCI_INTR_TYPE_INTX; |
1801 | counts[PCI_INTR_TYPE_INTX] = 1; |
1802 | goto alloc_retry; |
1803 | } |
1804 | } else { |
1805 | wm_adjust_qnum(sc, 0); /* must not use multiqueue */ |
1806 | error = wm_setup_legacy(sc); |
1807 | if (error) { |
1808 | pci_intr_release(sc->sc_pc, sc->sc_intrs, |
1809 | counts[PCI_INTR_TYPE_INTX]); |
1810 | return; |
1811 | } |
1812 | } |
1813 | |
1814 | /* |
1815 | * Check the function ID (unit number of the chip). |
1816 | */ |
1817 | if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) |
1818 | || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) |
1819 | || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) |
1820 | || (sc->sc_type == WM_T_82580) |
1821 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) |
1822 | sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS) |
1823 | >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK; |
1824 | else |
1825 | sc->sc_funcid = 0; |
1826 | |
1827 | /* |
1828 | * Determine a few things about the bus we're connected to. |
1829 | */ |
1830 | if (sc->sc_type < WM_T_82543) { |
1831 | /* We don't really know the bus characteristics here. */ |
1832 | sc->sc_bus_speed = 33; |
1833 | } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { |
1834 | /* |
1835 | * CSA (Communication Streaming Architecture) is about as fast |
1836 | * a 32-bit 66MHz PCI Bus. |
1837 | */ |
1838 | sc->sc_flags |= WM_F_CSA; |
1839 | sc->sc_bus_speed = 66; |
1840 | aprint_verbose_dev(sc->sc_dev, |
1841 | "Communication Streaming Architecture\n" ); |
1842 | if (sc->sc_type == WM_T_82547) { |
1843 | callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS); |
1844 | callout_setfunc(&sc->sc_txfifo_ch, |
1845 | wm_82547_txfifo_stall, sc); |
1846 | aprint_verbose_dev(sc->sc_dev, |
1847 | "using 82547 Tx FIFO stall work-around\n" ); |
1848 | } |
1849 | } else if (sc->sc_type >= WM_T_82571) { |
1850 | sc->sc_flags |= WM_F_PCIE; |
1851 | if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) |
1852 | && (sc->sc_type != WM_T_ICH10) |
1853 | && (sc->sc_type != WM_T_PCH) |
1854 | && (sc->sc_type != WM_T_PCH2) |
1855 | && (sc->sc_type != WM_T_PCH_LPT) |
1856 | && (sc->sc_type != WM_T_PCH_SPT)) { |
1857 | /* ICH* and PCH* have no PCIe capability registers */ |
1858 | if (pci_get_capability(pa->pa_pc, pa->pa_tag, |
1859 | PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, |
1860 | NULL) == 0) |
1861 | aprint_error_dev(sc->sc_dev, |
1862 | "unable to find PCIe capability\n" ); |
1863 | } |
1864 | aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n" ); |
1865 | } else { |
1866 | reg = CSR_READ(sc, WMREG_STATUS); |
1867 | if (reg & STATUS_BUS64) |
1868 | sc->sc_flags |= WM_F_BUS64; |
1869 | if ((reg & STATUS_PCIX_MODE) != 0) { |
1870 | pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; |
1871 | |
1872 | sc->sc_flags |= WM_F_PCIX; |
1873 | if (pci_get_capability(pa->pa_pc, pa->pa_tag, |
1874 | PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0) |
1875 | aprint_error_dev(sc->sc_dev, |
1876 | "unable to find PCIX capability\n" ); |
1877 | else if (sc->sc_type != WM_T_82545_3 && |
1878 | sc->sc_type != WM_T_82546_3) { |
1879 | /* |
1880 | * Work around a problem caused by the BIOS |
1881 | * setting the max memory read byte count |
1882 | * incorrectly. |
1883 | */ |
1884 | pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, |
1885 | sc->sc_pcixe_capoff + PCIX_CMD); |
1886 | pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, |
1887 | sc->sc_pcixe_capoff + PCIX_STATUS); |
1888 | |
1889 | bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >> |
1890 | PCIX_CMD_BYTECNT_SHIFT; |
1891 | maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >> |
1892 | PCIX_STATUS_MAXB_SHIFT; |
1893 | if (bytecnt > maxb) { |
1894 | aprint_verbose_dev(sc->sc_dev, |
1895 | "resetting PCI-X MMRBC: %d -> %d\n" , |
1896 | 512 << bytecnt, 512 << maxb); |
1897 | pcix_cmd = (pcix_cmd & |
1898 | ~PCIX_CMD_BYTECNT_MASK) | |
1899 | (maxb << PCIX_CMD_BYTECNT_SHIFT); |
1900 | pci_conf_write(pa->pa_pc, pa->pa_tag, |
1901 | sc->sc_pcixe_capoff + PCIX_CMD, |
1902 | pcix_cmd); |
1903 | } |
1904 | } |
1905 | } |
1906 | /* |
1907 | * The quad port adapter is special; it has a PCIX-PCIX |
1908 | * bridge on the board, and can run the secondary bus at |
1909 | * a higher speed. |
1910 | */ |
1911 | if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { |
1912 | sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 |
1913 | : 66; |
1914 | } else if (sc->sc_flags & WM_F_PCIX) { |
1915 | switch (reg & STATUS_PCIXSPD_MASK) { |
1916 | case STATUS_PCIXSPD_50_66: |
1917 | sc->sc_bus_speed = 66; |
1918 | break; |
1919 | case STATUS_PCIXSPD_66_100: |
1920 | sc->sc_bus_speed = 100; |
1921 | break; |
1922 | case STATUS_PCIXSPD_100_133: |
1923 | sc->sc_bus_speed = 133; |
1924 | break; |
1925 | default: |
1926 | aprint_error_dev(sc->sc_dev, |
1927 | "unknown PCIXSPD %d; assuming 66MHz\n" , |
1928 | reg & STATUS_PCIXSPD_MASK); |
1929 | sc->sc_bus_speed = 66; |
1930 | break; |
1931 | } |
1932 | } else |
1933 | sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; |
1934 | aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n" , |
1935 | (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, |
1936 | (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI" ); |
1937 | } |
1938 | |
1939 | /* clear interesting stat counters */ |
1940 | CSR_READ(sc, WMREG_COLC); |
1941 | CSR_READ(sc, WMREG_RXERRC); |
1942 | |
1943 | if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583) |
1944 | || (sc->sc_type >= WM_T_ICH8)) |
1945 | sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
1946 | if (sc->sc_type >= WM_T_ICH8) |
1947 | sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
1948 | |
1949 | /* Set PHY, NVM mutex related stuff */ |
1950 | switch (sc->sc_type) { |
1951 | case WM_T_82542_2_0: |
1952 | case WM_T_82542_2_1: |
1953 | case WM_T_82543: |
1954 | case WM_T_82544: |
1955 | /* Microwire */ |
1956 | sc->sc_nvm_wordsize = 64; |
1957 | sc->sc_nvm_addrbits = 6; |
1958 | break; |
1959 | case WM_T_82540: |
1960 | case WM_T_82545: |
1961 | case WM_T_82545_3: |
1962 | case WM_T_82546: |
1963 | case WM_T_82546_3: |
1964 | /* Microwire */ |
1965 | reg = CSR_READ(sc, WMREG_EECD); |
1966 | if (reg & EECD_EE_SIZE) { |
1967 | sc->sc_nvm_wordsize = 256; |
1968 | sc->sc_nvm_addrbits = 8; |
1969 | } else { |
1970 | sc->sc_nvm_wordsize = 64; |
1971 | sc->sc_nvm_addrbits = 6; |
1972 | } |
1973 | sc->sc_flags |= WM_F_LOCK_EECD; |
1974 | break; |
1975 | case WM_T_82541: |
1976 | case WM_T_82541_2: |
1977 | case WM_T_82547: |
1978 | case WM_T_82547_2: |
1979 | sc->sc_flags |= WM_F_LOCK_EECD; |
1980 | reg = CSR_READ(sc, WMREG_EECD); |
1981 | if (reg & EECD_EE_TYPE) { |
1982 | /* SPI */ |
1983 | sc->sc_flags |= WM_F_EEPROM_SPI; |
1984 | wm_nvm_set_addrbits_size_eecd(sc); |
1985 | } else { |
1986 | /* Microwire */ |
1987 | if ((reg & EECD_EE_ABITS) != 0) { |
1988 | sc->sc_nvm_wordsize = 256; |
1989 | sc->sc_nvm_addrbits = 8; |
1990 | } else { |
1991 | sc->sc_nvm_wordsize = 64; |
1992 | sc->sc_nvm_addrbits = 6; |
1993 | } |
1994 | } |
1995 | break; |
1996 | case WM_T_82571: |
1997 | case WM_T_82572: |
1998 | /* SPI */ |
1999 | sc->sc_flags |= WM_F_EEPROM_SPI; |
2000 | wm_nvm_set_addrbits_size_eecd(sc); |
2001 | sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM; |
2002 | sc->phy.acquire = wm_get_swsm_semaphore; |
2003 | sc->phy.release = wm_put_swsm_semaphore; |
2004 | break; |
2005 | case WM_T_82573: |
2006 | case WM_T_82574: |
2007 | case WM_T_82583: |
2008 | if (sc->sc_type == WM_T_82573) { |
2009 | sc->sc_flags |= WM_F_LOCK_SWSM; |
2010 | sc->phy.acquire = wm_get_swsm_semaphore; |
2011 | sc->phy.release = wm_put_swsm_semaphore; |
2012 | } else { |
2013 | sc->sc_flags |= WM_F_LOCK_EXTCNF; |
2014 | /* Both PHY and NVM use the same semaphore. */ |
2015 | sc->phy.acquire |
2016 | = wm_get_swfwhw_semaphore; |
2017 | sc->phy.release |
2018 | = wm_put_swfwhw_semaphore; |
2019 | } |
2020 | if (wm_nvm_is_onboard_eeprom(sc) == 0) { |
2021 | sc->sc_flags |= WM_F_EEPROM_FLASH; |
2022 | sc->sc_nvm_wordsize = 2048; |
2023 | } else { |
2024 | /* SPI */ |
2025 | sc->sc_flags |= WM_F_EEPROM_SPI; |
2026 | wm_nvm_set_addrbits_size_eecd(sc); |
2027 | } |
2028 | sc->sc_flags |= WM_F_EEPROM_EERDEEWR; |
2029 | break; |
2030 | case WM_T_82575: |
2031 | case WM_T_82576: |
2032 | case WM_T_82580: |
2033 | case WM_T_I350: |
2034 | case WM_T_I354: |
2035 | case WM_T_80003: |
2036 | /* SPI */ |
2037 | sc->sc_flags |= WM_F_EEPROM_SPI; |
2038 | wm_nvm_set_addrbits_size_eecd(sc); |
2039 | sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW |
2040 | | WM_F_LOCK_SWSM; |
2041 | sc->phy.acquire = wm_get_phy_82575; |
2042 | sc->phy.release = wm_put_phy_82575; |
2043 | break; |
2044 | case WM_T_ICH8: |
2045 | case WM_T_ICH9: |
2046 | case WM_T_ICH10: |
2047 | case WM_T_PCH: |
2048 | case WM_T_PCH2: |
2049 | case WM_T_PCH_LPT: |
2050 | /* FLASH */ |
2051 | sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF; |
2052 | sc->sc_nvm_wordsize = 2048; |
2053 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH); |
2054 | if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, |
2055 | &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) { |
2056 | aprint_error_dev(sc->sc_dev, |
2057 | "can't map FLASH registers\n" ); |
2058 | goto out; |
2059 | } |
2060 | reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); |
2061 | sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) * |
2062 | ICH_FLASH_SECTOR_SIZE; |
2063 | sc->sc_ich8_flash_bank_size = |
2064 | ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1; |
2065 | sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK); |
2066 | sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; |
2067 | sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); |
2068 | sc->sc_flashreg_offset = 0; |
2069 | sc->phy.acquire = wm_get_swflag_ich8lan; |
2070 | sc->phy.release = wm_put_swflag_ich8lan; |
2071 | break; |
2072 | case WM_T_PCH_SPT: |
2073 | /* SPT has no GFPREG; flash registers mapped through BAR0 */ |
2074 | sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF; |
2075 | sc->sc_flasht = sc->sc_st; |
2076 | sc->sc_flashh = sc->sc_sh; |
2077 | sc->sc_ich8_flash_base = 0; |
2078 | sc->sc_nvm_wordsize = |
2079 | (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1) |
2080 | * NVM_SIZE_MULTIPLIER; |
2081 | /* It is size in bytes, we want words */ |
2082 | sc->sc_nvm_wordsize /= 2; |
2083 | /* assume 2 banks */ |
2084 | sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2; |
2085 | sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET; |
2086 | sc->phy.acquire = wm_get_swflag_ich8lan; |
2087 | sc->phy.release = wm_put_swflag_ich8lan; |
2088 | break; |
2089 | case WM_T_I210: |
2090 | case WM_T_I211: |
2091 | if (wm_nvm_get_flash_presence_i210(sc)) { |
2092 | wm_nvm_set_addrbits_size_eecd(sc); |
2093 | sc->sc_flags |= WM_F_EEPROM_FLASH_HW; |
2094 | sc->sc_flags |= WM_F_EEPROM_EERDEEWR; |
2095 | } else { |
2096 | sc->sc_nvm_wordsize = INVM_SIZE; |
2097 | sc->sc_flags |= WM_F_EEPROM_INVM; |
2098 | } |
2099 | sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM; |
2100 | sc->phy.acquire = wm_get_phy_82575; |
2101 | sc->phy.release = wm_put_phy_82575; |
2102 | break; |
2103 | default: |
2104 | break; |
2105 | } |
2106 | |
2107 | /* Reset the chip to a known state. */ |
2108 | wm_reset(sc); |
2109 | |
2110 | /* Ensure the SMBI bit is clear before first NVM or PHY access */ |
2111 | switch (sc->sc_type) { |
2112 | case WM_T_82571: |
2113 | case WM_T_82572: |
2114 | reg = CSR_READ(sc, WMREG_SWSM2); |
2115 | if ((reg & SWSM2_LOCK) == 0) { |
2116 | CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK); |
2117 | force_clear_smbi = true; |
2118 | } else |
2119 | force_clear_smbi = false; |
2120 | break; |
2121 | case WM_T_82573: |
2122 | case WM_T_82574: |
2123 | case WM_T_82583: |
2124 | force_clear_smbi = true; |
2125 | break; |
2126 | default: |
2127 | force_clear_smbi = false; |
2128 | break; |
2129 | } |
2130 | if (force_clear_smbi) { |
2131 | reg = CSR_READ(sc, WMREG_SWSM); |
2132 | if ((reg & SWSM_SMBI) != 0) |
2133 | aprint_error_dev(sc->sc_dev, |
2134 | "Please update the Bootagent\n" ); |
2135 | CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI); |
2136 | } |
2137 | |
2138 | /* |
2139 | * Defer printing the EEPROM type until after verifying the checksum |
2140 | * This allows the EEPROM type to be printed correctly in the case |
2141 | * that no EEPROM is attached. |
2142 | */ |
2143 | /* |
2144 | * Validate the EEPROM checksum. If the checksum fails, flag |
2145 | * this for later, so we can fail future reads from the EEPROM. |
2146 | */ |
2147 | if (wm_nvm_validate_checksum(sc)) { |
2148 | /* |
2149 | * Read twice again because some PCI-e parts fail the |
2150 | * first check due to the link being in sleep state. |
2151 | */ |
2152 | if (wm_nvm_validate_checksum(sc)) |
2153 | sc->sc_flags |= WM_F_EEPROM_INVALID; |
2154 | } |
2155 | |
2156 | /* Set device properties (macflags) */ |
2157 | prop_dictionary_set_uint32(dict, "macflags" , sc->sc_flags); |
2158 | |
2159 | if (sc->sc_flags & WM_F_EEPROM_INVALID) |
2160 | aprint_verbose_dev(sc->sc_dev, "No EEPROM" ); |
2161 | else { |
2162 | aprint_verbose_dev(sc->sc_dev, "%u words " , |
2163 | sc->sc_nvm_wordsize); |
2164 | if (sc->sc_flags & WM_F_EEPROM_INVM) |
2165 | aprint_verbose("iNVM" ); |
2166 | else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) |
2167 | aprint_verbose("FLASH(HW)" ); |
2168 | else if (sc->sc_flags & WM_F_EEPROM_FLASH) |
2169 | aprint_verbose("FLASH" ); |
2170 | else { |
2171 | if (sc->sc_flags & WM_F_EEPROM_SPI) |
2172 | eetype = "SPI" ; |
2173 | else |
2174 | eetype = "MicroWire" ; |
2175 | aprint_verbose("(%d address bits) %s EEPROM" , |
2176 | sc->sc_nvm_addrbits, eetype); |
2177 | } |
2178 | } |
2179 | wm_nvm_version(sc); |
2180 | aprint_verbose("\n" ); |
2181 | |
2182 | /* Check for I21[01] PLL workaround */ |
2183 | if (sc->sc_type == WM_T_I210) |
2184 | sc->sc_flags |= WM_F_PLL_WA_I210; |
2185 | if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) { |
2186 | /* NVM image release 3.25 has a workaround */ |
2187 | if ((sc->sc_nvm_ver_major < 3) |
2188 | || ((sc->sc_nvm_ver_major == 3) |
2189 | && (sc->sc_nvm_ver_minor < 25))) { |
2190 | aprint_verbose_dev(sc->sc_dev, |
2191 | "ROM image version %d.%d is older than 3.25\n" , |
2192 | sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor); |
2193 | sc->sc_flags |= WM_F_PLL_WA_I210; |
2194 | } |
2195 | } |
2196 | if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) |
2197 | wm_pll_workaround_i210(sc); |
2198 | |
2199 | wm_get_wakeup(sc); |
2200 | |
2201 | /* Non-AMT based hardware can now take control from firmware */ |
2202 | if ((sc->sc_flags & WM_F_HAS_AMT) == 0) |
2203 | wm_get_hw_control(sc); |
2204 | |
2205 | /* |
2206 | * Read the Ethernet address from the EEPROM, if not first found |
2207 | * in device properties. |
2208 | */ |
2209 | ea = prop_dictionary_get(dict, "mac-address" ); |
2210 | if (ea != NULL) { |
2211 | KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); |
2212 | KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); |
2213 | memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); |
2214 | } else { |
2215 | if (wm_read_mac_addr(sc, enaddr) != 0) { |
2216 | aprint_error_dev(sc->sc_dev, |
2217 | "unable to read Ethernet address\n" ); |
2218 | goto out; |
2219 | } |
2220 | } |
2221 | |
2222 | aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n" , |
2223 | ether_sprintf(enaddr)); |
2224 | |
2225 | /* |
2226 | * Read the config info from the EEPROM, and set up various |
2227 | * bits in the control registers based on their contents. |
2228 | */ |
2229 | pn = prop_dictionary_get(dict, "i82543-cfg1" ); |
2230 | if (pn != NULL) { |
2231 | KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); |
2232 | cfg1 = (uint16_t) prop_number_integer_value(pn); |
2233 | } else { |
2234 | if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) { |
2235 | aprint_error_dev(sc->sc_dev, "unable to read CFG1\n" ); |
2236 | goto out; |
2237 | } |
2238 | } |
2239 | |
2240 | pn = prop_dictionary_get(dict, "i82543-cfg2" ); |
2241 | if (pn != NULL) { |
2242 | KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); |
2243 | cfg2 = (uint16_t) prop_number_integer_value(pn); |
2244 | } else { |
2245 | if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) { |
2246 | aprint_error_dev(sc->sc_dev, "unable to read CFG2\n" ); |
2247 | goto out; |
2248 | } |
2249 | } |
2250 | |
2251 | /* check for WM_F_WOL */ |
2252 | switch (sc->sc_type) { |
2253 | case WM_T_82542_2_0: |
2254 | case WM_T_82542_2_1: |
2255 | case WM_T_82543: |
2256 | /* dummy? */ |
2257 | eeprom_data = 0; |
2258 | apme_mask = NVM_CFG3_APME; |
2259 | break; |
2260 | case WM_T_82544: |
2261 | apme_mask = NVM_CFG2_82544_APM_EN; |
2262 | eeprom_data = cfg2; |
2263 | break; |
2264 | case WM_T_82546: |
2265 | case WM_T_82546_3: |
2266 | case WM_T_82571: |
2267 | case WM_T_82572: |
2268 | case WM_T_82573: |
2269 | case WM_T_82574: |
2270 | case WM_T_82583: |
2271 | case WM_T_80003: |
2272 | default: |
2273 | apme_mask = NVM_CFG3_APME; |
2274 | wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB |
2275 | : NVM_OFF_CFG3_PORTA, 1, &eeprom_data); |
2276 | break; |
2277 | case WM_T_82575: |
2278 | case WM_T_82576: |
2279 | case WM_T_82580: |
2280 | case WM_T_I350: |
2281 | case WM_T_I354: /* XXX ok? */ |
2282 | case WM_T_ICH8: |
2283 | case WM_T_ICH9: |
2284 | case WM_T_ICH10: |
2285 | case WM_T_PCH: |
2286 | case WM_T_PCH2: |
2287 | case WM_T_PCH_LPT: |
2288 | case WM_T_PCH_SPT: |
2289 | /* XXX The funcid should be checked on some devices */ |
2290 | apme_mask = WUC_APME; |
2291 | eeprom_data = CSR_READ(sc, WMREG_WUC); |
2292 | break; |
2293 | } |
2294 | |
2295 | /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ |
2296 | if ((eeprom_data & apme_mask) != 0) |
2297 | sc->sc_flags |= WM_F_WOL; |
2298 | #ifdef WM_DEBUG |
2299 | if ((sc->sc_flags & WM_F_WOL) != 0) |
2300 | printf("WOL\n" ); |
2301 | #endif |
2302 | |
2303 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) { |
2304 | /* Check NVM for autonegotiation */ |
2305 | if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) { |
2306 | if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0) |
2307 | sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO; |
2308 | } |
2309 | } |
2310 | |
2311 | /* |
2312 | * XXX need special handling for some multiple port cards |
2313 | * to disable a paticular port. |
2314 | */ |
2315 | |
2316 | if (sc->sc_type >= WM_T_82544) { |
2317 | pn = prop_dictionary_get(dict, "i82543-swdpin" ); |
2318 | if (pn != NULL) { |
2319 | KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); |
2320 | swdpin = (uint16_t) prop_number_integer_value(pn); |
2321 | } else { |
2322 | if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) { |
2323 | aprint_error_dev(sc->sc_dev, |
2324 | "unable to read SWDPIN\n" ); |
2325 | goto out; |
2326 | } |
2327 | } |
2328 | } |
2329 | |
2330 | if (cfg1 & NVM_CFG1_ILOS) |
2331 | sc->sc_ctrl |= CTRL_ILOS; |
2332 | |
2333 | /* |
2334 | * XXX |
2335 | * This code isn't correct because pin 2 and 3 are located |
2336 | * in different position on newer chips. Check all datasheet. |
2337 | * |
2338 | * Until resolve this problem, check if a chip < 82580 |
2339 | */ |
2340 | if (sc->sc_type <= WM_T_82580) { |
2341 | if (sc->sc_type >= WM_T_82544) { |
2342 | sc->sc_ctrl |= |
2343 | ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) << |
2344 | CTRL_SWDPIO_SHIFT; |
2345 | sc->sc_ctrl |= |
2346 | ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) << |
2347 | CTRL_SWDPINS_SHIFT; |
2348 | } else { |
2349 | sc->sc_ctrl |= |
2350 | ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) << |
2351 | CTRL_SWDPIO_SHIFT; |
2352 | } |
2353 | } |
2354 | |
2355 | /* XXX For other than 82580? */ |
2356 | if (sc->sc_type == WM_T_82580) { |
2357 | wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword); |
2358 | if (nvmword & __BIT(13)) |
2359 | sc->sc_ctrl |= CTRL_ILOS; |
2360 | } |
2361 | |
2362 | #if 0 |
2363 | if (sc->sc_type >= WM_T_82544) { |
2364 | if (cfg1 & NVM_CFG1_IPS0) |
2365 | sc->sc_ctrl_ext |= CTRL_EXT_IPS; |
2366 | if (cfg1 & NVM_CFG1_IPS1) |
2367 | sc->sc_ctrl_ext |= CTRL_EXT_IPS1; |
2368 | sc->sc_ctrl_ext |= |
2369 | ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << |
2370 | CTRL_EXT_SWDPIO_SHIFT; |
2371 | sc->sc_ctrl_ext |= |
2372 | ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << |
2373 | CTRL_EXT_SWDPINS_SHIFT; |
2374 | } else { |
2375 | sc->sc_ctrl_ext |= |
2376 | ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) << |
2377 | CTRL_EXT_SWDPIO_SHIFT; |
2378 | } |
2379 | #endif |
2380 | |
2381 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
2382 | #if 0 |
2383 | CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); |
2384 | #endif |
2385 | |
2386 | if (sc->sc_type == WM_T_PCH) { |
2387 | uint16_t val; |
2388 | |
2389 | /* Save the NVM K1 bit setting */ |
2390 | wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val); |
2391 | |
2392 | if ((val & NVM_K1_CONFIG_ENABLE) != 0) |
2393 | sc->sc_nvm_k1_enabled = 1; |
2394 | else |
2395 | sc->sc_nvm_k1_enabled = 0; |
2396 | } |
2397 | |
2398 | /* |
2399 | * Determine if we're TBI,GMII or SGMII mode, and initialize the |
2400 | * media structures accordingly. |
2401 | */ |
2402 | if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 |
2403 | || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH |
2404 | || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT |
2405 | || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573 |
2406 | || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { |
2407 | /* STATUS_TBIMODE reserved/reused, can't rely on it */ |
2408 | wm_gmii_mediainit(sc, wmp->wmp_product); |
2409 | } else if (sc->sc_type < WM_T_82543 || |
2410 | (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { |
2411 | if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { |
2412 | aprint_error_dev(sc->sc_dev, |
2413 | "WARNING: TBIMODE set on 1000BASE-T product!\n" ); |
2414 | sc->sc_mediatype = WM_MEDIATYPE_FIBER; |
2415 | } |
2416 | wm_tbi_mediainit(sc); |
2417 | } else { |
2418 | switch (sc->sc_type) { |
2419 | case WM_T_82575: |
2420 | case WM_T_82576: |
2421 | case WM_T_82580: |
2422 | case WM_T_I350: |
2423 | case WM_T_I354: |
2424 | case WM_T_I210: |
2425 | case WM_T_I211: |
2426 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
2427 | link_mode = reg & CTRL_EXT_LINK_MODE_MASK; |
2428 | switch (link_mode) { |
2429 | case CTRL_EXT_LINK_MODE_1000KX: |
2430 | aprint_verbose_dev(sc->sc_dev, "1000KX\n" ); |
2431 | sc->sc_mediatype = WM_MEDIATYPE_SERDES; |
2432 | break; |
2433 | case CTRL_EXT_LINK_MODE_SGMII: |
2434 | if (wm_sgmii_uses_mdio(sc)) { |
2435 | aprint_verbose_dev(sc->sc_dev, |
2436 | "SGMII(MDIO)\n" ); |
2437 | sc->sc_flags |= WM_F_SGMII; |
2438 | sc->sc_mediatype = WM_MEDIATYPE_COPPER; |
2439 | break; |
2440 | } |
2441 | aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n" ); |
2442 | /*FALLTHROUGH*/ |
2443 | case CTRL_EXT_LINK_MODE_PCIE_SERDES: |
2444 | sc->sc_mediatype = wm_sfp_get_media_type(sc); |
2445 | if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) { |
2446 | if (link_mode |
2447 | == CTRL_EXT_LINK_MODE_SGMII) { |
2448 | sc->sc_mediatype |
2449 | = WM_MEDIATYPE_COPPER; |
2450 | sc->sc_flags |= WM_F_SGMII; |
2451 | } else { |
2452 | sc->sc_mediatype |
2453 | = WM_MEDIATYPE_SERDES; |
2454 | aprint_verbose_dev(sc->sc_dev, |
2455 | "SERDES\n" ); |
2456 | } |
2457 | break; |
2458 | } |
2459 | if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) |
2460 | aprint_verbose_dev(sc->sc_dev, |
2461 | "SERDES\n" ); |
2462 | |
2463 | /* Change current link mode setting */ |
2464 | reg &= ~CTRL_EXT_LINK_MODE_MASK; |
2465 | switch (sc->sc_mediatype) { |
2466 | case WM_MEDIATYPE_COPPER: |
2467 | reg |= CTRL_EXT_LINK_MODE_SGMII; |
2468 | break; |
2469 | case WM_MEDIATYPE_SERDES: |
2470 | reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES; |
2471 | break; |
2472 | default: |
2473 | break; |
2474 | } |
2475 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
2476 | break; |
2477 | case CTRL_EXT_LINK_MODE_GMII: |
2478 | default: |
2479 | aprint_verbose_dev(sc->sc_dev, "Copper\n" ); |
2480 | sc->sc_mediatype = WM_MEDIATYPE_COPPER; |
2481 | break; |
2482 | } |
2483 | |
2484 | reg &= ~CTRL_EXT_I2C_ENA; |
2485 | if ((sc->sc_flags & WM_F_SGMII) != 0) |
2486 | reg |= CTRL_EXT_I2C_ENA; |
2487 | else |
2488 | reg &= ~CTRL_EXT_I2C_ENA; |
2489 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
2490 | |
2491 | if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) |
2492 | wm_gmii_mediainit(sc, wmp->wmp_product); |
2493 | else |
2494 | wm_tbi_mediainit(sc); |
2495 | break; |
2496 | default: |
2497 | if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) |
2498 | aprint_error_dev(sc->sc_dev, |
2499 | "WARNING: TBIMODE clear on 1000BASE-X product!\n" ); |
2500 | sc->sc_mediatype = WM_MEDIATYPE_COPPER; |
2501 | wm_gmii_mediainit(sc, wmp->wmp_product); |
2502 | } |
2503 | } |
2504 | |
2505 | ifp = &sc->sc_ethercom.ec_if; |
2506 | xname = device_xname(sc->sc_dev); |
2507 | strlcpy(ifp->if_xname, xname, IFNAMSIZ); |
2508 | ifp->if_softc = sc; |
2509 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
2510 | ifp->if_extflags = IFEF_START_MPSAFE; |
2511 | ifp->if_ioctl = wm_ioctl; |
2512 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
2513 | ifp->if_start = wm_nq_start; |
2514 | if (sc->sc_nqueues > 1) |
2515 | ifp->if_transmit = wm_nq_transmit; |
2516 | } else |
2517 | ifp->if_start = wm_start; |
2518 | ifp->if_watchdog = wm_watchdog; |
2519 | ifp->if_init = wm_init; |
2520 | ifp->if_stop = wm_stop; |
2521 | IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); |
2522 | IFQ_SET_READY(&ifp->if_snd); |
2523 | |
2524 | /* Check for jumbo frame */ |
2525 | switch (sc->sc_type) { |
2526 | case WM_T_82573: |
2527 | /* XXX limited to 9234 if ASPM is disabled */ |
2528 | wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword); |
2529 | if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0) |
2530 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; |
2531 | break; |
2532 | case WM_T_82571: |
2533 | case WM_T_82572: |
2534 | case WM_T_82574: |
2535 | case WM_T_82575: |
2536 | case WM_T_82576: |
2537 | case WM_T_82580: |
2538 | case WM_T_I350: |
2539 | case WM_T_I354: /* XXXX ok? */ |
2540 | case WM_T_I210: |
2541 | case WM_T_I211: |
2542 | case WM_T_80003: |
2543 | case WM_T_ICH9: |
2544 | case WM_T_ICH10: |
2545 | case WM_T_PCH2: /* PCH2 supports 9K frame size */ |
2546 | case WM_T_PCH_LPT: |
2547 | case WM_T_PCH_SPT: |
2548 | /* XXX limited to 9234 */ |
2549 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; |
2550 | break; |
2551 | case WM_T_PCH: |
2552 | /* XXX limited to 4096 */ |
2553 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; |
2554 | break; |
2555 | case WM_T_82542_2_0: |
2556 | case WM_T_82542_2_1: |
2557 | case WM_T_82583: |
2558 | case WM_T_ICH8: |
2559 | /* No support for jumbo frame */ |
2560 | break; |
2561 | default: |
2562 | /* ETHER_MAX_LEN_JUMBO */ |
2563 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; |
2564 | break; |
2565 | } |
2566 | |
2567 | /* If we're a i82543 or greater, we can support VLANs. */ |
2568 | if (sc->sc_type >= WM_T_82543) |
2569 | sc->sc_ethercom.ec_capabilities |= |
2570 | ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; |
2571 | |
2572 | /* |
2573 | * We can perform TCPv4 and UDPv4 checkums in-bound. Only |
2574 | * on i82543 and later. |
2575 | */ |
2576 | if (sc->sc_type >= WM_T_82543) { |
2577 | ifp->if_capabilities |= |
2578 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
2579 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
2580 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | |
2581 | IFCAP_CSUM_TCPv6_Tx | |
2582 | IFCAP_CSUM_UDPv6_Tx; |
2583 | } |
2584 | |
2585 | /* |
2586 | * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. |
2587 | * |
2588 | * 82541GI (8086:1076) ... no |
2589 | * 82572EI (8086:10b9) ... yes |
2590 | */ |
2591 | if (sc->sc_type >= WM_T_82571) { |
2592 | ifp->if_capabilities |= |
2593 | IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; |
2594 | } |
2595 | |
2596 | /* |
2597 | * If we're a i82544 or greater (except i82547), we can do |
2598 | * TCP segmentation offload. |
2599 | */ |
2600 | if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { |
2601 | ifp->if_capabilities |= IFCAP_TSOv4; |
2602 | } |
2603 | |
2604 | if (sc->sc_type >= WM_T_82571) { |
2605 | ifp->if_capabilities |= IFCAP_TSOv6; |
2606 | } |
2607 | |
2608 | #ifdef WM_MPSAFE |
2609 | sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
2610 | #else |
2611 | sc->sc_core_lock = NULL; |
2612 | #endif |
2613 | |
2614 | /* Attach the interface. */ |
2615 | if_initialize(ifp); |
2616 | sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if); |
2617 | ether_ifattach(ifp, enaddr); |
2618 | if_register(ifp); |
2619 | ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); |
2620 | rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, |
2621 | RND_FLAG_DEFAULT); |
2622 | |
2623 | #ifdef WM_EVENT_COUNTERS |
2624 | /* Attach event counters. */ |
2625 | evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, |
2626 | NULL, xname, "linkintr" ); |
2627 | |
2628 | evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, |
2629 | NULL, xname, "tx_xoff" ); |
2630 | evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, |
2631 | NULL, xname, "tx_xon" ); |
2632 | evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, |
2633 | NULL, xname, "rx_xoff" ); |
2634 | evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, |
2635 | NULL, xname, "rx_xon" ); |
2636 | evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, |
2637 | NULL, xname, "rx_macctl" ); |
2638 | #endif /* WM_EVENT_COUNTERS */ |
2639 | |
2640 | if (pmf_device_register(self, wm_suspend, wm_resume)) |
2641 | pmf_class_network_register(self, ifp); |
2642 | else |
2643 | aprint_error_dev(self, "couldn't establish power handler\n" ); |
2644 | |
2645 | sc->sc_flags |= WM_F_ATTACHED; |
2646 | out: |
2647 | return; |
2648 | } |
2649 | |
2650 | /* The detach function (ca_detach) */ |
2651 | static int |
2652 | wm_detach(device_t self, int flags __unused) |
2653 | { |
2654 | struct wm_softc *sc = device_private(self); |
2655 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
2656 | int i; |
2657 | |
2658 | if ((sc->sc_flags & WM_F_ATTACHED) == 0) |
2659 | return 0; |
2660 | |
2661 | /* Stop the interface. Callouts are stopped in it. */ |
2662 | wm_stop(ifp, 1); |
2663 | |
2664 | pmf_device_deregister(self); |
2665 | |
2666 | /* Tell the firmware about the release */ |
2667 | WM_CORE_LOCK(sc); |
2668 | wm_release_manageability(sc); |
2669 | wm_release_hw_control(sc); |
2670 | wm_enable_wakeup(sc); |
2671 | WM_CORE_UNLOCK(sc); |
2672 | |
2673 | mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); |
2674 | |
2675 | /* Delete all remaining media. */ |
2676 | ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); |
2677 | |
2678 | ether_ifdetach(ifp); |
2679 | if_detach(ifp); |
2680 | if_percpuq_destroy(sc->sc_ipq); |
2681 | |
2682 | /* Unload RX dmamaps and free mbufs */ |
2683 | for (i = 0; i < sc->sc_nqueues; i++) { |
2684 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
2685 | mutex_enter(rxq->rxq_lock); |
2686 | wm_rxdrain(rxq); |
2687 | mutex_exit(rxq->rxq_lock); |
2688 | } |
2689 | /* Must unlock here */ |
2690 | |
2691 | /* Disestablish the interrupt handler */ |
2692 | for (i = 0; i < sc->sc_nintrs; i++) { |
2693 | if (sc->sc_ihs[i] != NULL) { |
2694 | pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]); |
2695 | sc->sc_ihs[i] = NULL; |
2696 | } |
2697 | } |
2698 | pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs); |
2699 | |
2700 | wm_free_txrx_queues(sc); |
2701 | |
2702 | /* Unmap the registers */ |
2703 | if (sc->sc_ss) { |
2704 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss); |
2705 | sc->sc_ss = 0; |
2706 | } |
2707 | if (sc->sc_ios) { |
2708 | bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); |
2709 | sc->sc_ios = 0; |
2710 | } |
2711 | if (sc->sc_flashs) { |
2712 | bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs); |
2713 | sc->sc_flashs = 0; |
2714 | } |
2715 | |
2716 | if (sc->sc_core_lock) |
2717 | mutex_obj_free(sc->sc_core_lock); |
2718 | if (sc->sc_ich_phymtx) |
2719 | mutex_obj_free(sc->sc_ich_phymtx); |
2720 | if (sc->sc_ich_nvmmtx) |
2721 | mutex_obj_free(sc->sc_ich_nvmmtx); |
2722 | |
2723 | return 0; |
2724 | } |
2725 | |
2726 | static bool |
2727 | wm_suspend(device_t self, const pmf_qual_t *qual) |
2728 | { |
2729 | struct wm_softc *sc = device_private(self); |
2730 | |
2731 | wm_release_manageability(sc); |
2732 | wm_release_hw_control(sc); |
2733 | wm_enable_wakeup(sc); |
2734 | |
2735 | return true; |
2736 | } |
2737 | |
2738 | static bool |
2739 | wm_resume(device_t self, const pmf_qual_t *qual) |
2740 | { |
2741 | struct wm_softc *sc = device_private(self); |
2742 | |
2743 | wm_init_manageability(sc); |
2744 | |
2745 | return true; |
2746 | } |
2747 | |
2748 | /* |
2749 | * wm_watchdog: [ifnet interface function] |
2750 | * |
2751 | * Watchdog timer handler. |
2752 | */ |
2753 | static void |
2754 | wm_watchdog(struct ifnet *ifp) |
2755 | { |
2756 | int qid; |
2757 | struct wm_softc *sc = ifp->if_softc; |
2758 | |
2759 | for (qid = 0; qid < sc->sc_nqueues; qid++) { |
2760 | struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq; |
2761 | |
2762 | wm_watchdog_txq(ifp, txq); |
2763 | } |
2764 | |
2765 | /* Reset the interface. */ |
2766 | (void) wm_init(ifp); |
2767 | |
2768 | /* |
2769 | * There are still some upper layer processing which call |
2770 | * ifp->if_start(). e.g. ALTQ |
2771 | */ |
2772 | /* Try to get more packets going. */ |
2773 | ifp->if_start(ifp); |
2774 | } |
2775 | |
2776 | static void |
2777 | wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq) |
2778 | { |
2779 | struct wm_softc *sc = ifp->if_softc; |
2780 | |
2781 | /* |
2782 | * Since we're using delayed interrupts, sweep up |
2783 | * before we report an error. |
2784 | */ |
2785 | mutex_enter(txq->txq_lock); |
2786 | wm_txeof(sc, txq); |
2787 | mutex_exit(txq->txq_lock); |
2788 | |
2789 | if (txq->txq_free != WM_NTXDESC(txq)) { |
2790 | #ifdef WM_DEBUG |
2791 | int i, j; |
2792 | struct wm_txsoft *txs; |
2793 | #endif |
2794 | log(LOG_ERR, |
2795 | "%s: device timeout (txfree %d txsfree %d txnext %d)\n" , |
2796 | device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree, |
2797 | txq->txq_next); |
2798 | ifp->if_oerrors++; |
2799 | #ifdef WM_DEBUG |
2800 | for (i = txq->txq_sdirty; i != txq->txq_snext ; |
2801 | i = WM_NEXTTXS(txq, i)) { |
2802 | txs = &txq->txq_soft[i]; |
2803 | printf("txs %d tx %d -> %d\n" , |
2804 | i, txs->txs_firstdesc, txs->txs_lastdesc); |
2805 | for (j = txs->txs_firstdesc; ; |
2806 | j = WM_NEXTTX(txq, j)) { |
2807 | printf("\tdesc %d: 0x%" PRIx64 "\n" , j, |
2808 | txq->txq_nq_descs[j].nqtx_data.nqtxd_addr); |
2809 | printf("\t %#08x%08x\n" , |
2810 | txq->txq_nq_descs[j].nqtx_data.nqtxd_fields, |
2811 | txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen); |
2812 | if (j == txs->txs_lastdesc) |
2813 | break; |
2814 | } |
2815 | } |
2816 | #endif |
2817 | } |
2818 | } |
2819 | |
2820 | /* |
2821 | * wm_tick: |
2822 | * |
2823 | * One second timer, used to check link status, sweep up |
2824 | * completed transmit jobs, etc. |
2825 | */ |
2826 | static void |
2827 | wm_tick(void *arg) |
2828 | { |
2829 | struct wm_softc *sc = arg; |
2830 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
2831 | #ifndef WM_MPSAFE |
2832 | int s = splnet(); |
2833 | #endif |
2834 | |
2835 | WM_CORE_LOCK(sc); |
2836 | |
2837 | if (sc->sc_core_stopping) |
2838 | goto out; |
2839 | |
2840 | if (sc->sc_type >= WM_T_82542_2_1) { |
2841 | WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); |
2842 | WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); |
2843 | WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); |
2844 | WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); |
2845 | WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); |
2846 | } |
2847 | |
2848 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); |
2849 | ifp->if_ierrors += 0ULL + /* ensure quad_t */ |
2850 | + CSR_READ(sc, WMREG_CRCERRS) |
2851 | + CSR_READ(sc, WMREG_ALGNERRC) |
2852 | + CSR_READ(sc, WMREG_SYMERRC) |
2853 | + CSR_READ(sc, WMREG_RXERRC) |
2854 | + CSR_READ(sc, WMREG_SEC) |
2855 | + CSR_READ(sc, WMREG_CEXTERR) |
2856 | + CSR_READ(sc, WMREG_RLEC); |
2857 | /* |
2858 | * WMREG_RNBC is incremented when there is no available buffers in host |
2859 | * memory. It does not mean the number of dropped packet. Because |
2860 | * ethernet controller can receive packets in such case if there is |
2861 | * space in phy's FIFO. |
2862 | * |
2863 | * If you want to know the nubmer of WMREG_RMBC, you should use such as |
2864 | * own EVCNT instead of if_iqdrops. |
2865 | */ |
2866 | ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC); |
2867 | |
2868 | if (sc->sc_flags & WM_F_HAS_MII) |
2869 | mii_tick(&sc->sc_mii); |
2870 | else if ((sc->sc_type >= WM_T_82575) |
2871 | && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) |
2872 | wm_serdes_tick(sc); |
2873 | else |
2874 | wm_tbi_tick(sc); |
2875 | |
2876 | out: |
2877 | WM_CORE_UNLOCK(sc); |
2878 | #ifndef WM_MPSAFE |
2879 | splx(s); |
2880 | #endif |
2881 | |
2882 | if (!sc->sc_core_stopping) |
2883 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); |
2884 | } |
2885 | |
2886 | static int |
2887 | wm_ifflags_cb(struct ethercom *ec) |
2888 | { |
2889 | struct ifnet *ifp = &ec->ec_if; |
2890 | struct wm_softc *sc = ifp->if_softc; |
2891 | int rc = 0; |
2892 | |
2893 | WM_CORE_LOCK(sc); |
2894 | |
2895 | int change = ifp->if_flags ^ sc->sc_if_flags; |
2896 | sc->sc_if_flags = ifp->if_flags; |
2897 | |
2898 | if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { |
2899 | rc = ENETRESET; |
2900 | goto out; |
2901 | } |
2902 | |
2903 | if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) |
2904 | wm_set_filter(sc); |
2905 | |
2906 | wm_set_vlan(sc); |
2907 | |
2908 | out: |
2909 | WM_CORE_UNLOCK(sc); |
2910 | |
2911 | return rc; |
2912 | } |
2913 | |
2914 | /* |
2915 | * wm_ioctl: [ifnet interface function] |
2916 | * |
2917 | * Handle control requests from the operator. |
2918 | */ |
2919 | static int |
2920 | wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
2921 | { |
2922 | struct wm_softc *sc = ifp->if_softc; |
2923 | struct ifreq *ifr = (struct ifreq *) data; |
2924 | struct ifaddr *ifa = (struct ifaddr *)data; |
2925 | struct sockaddr_dl *sdl; |
2926 | int s, error; |
2927 | |
2928 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
2929 | device_xname(sc->sc_dev), __func__)); |
2930 | |
2931 | #ifndef WM_MPSAFE |
2932 | s = splnet(); |
2933 | #endif |
2934 | switch (cmd) { |
2935 | case SIOCSIFMEDIA: |
2936 | case SIOCGIFMEDIA: |
2937 | WM_CORE_LOCK(sc); |
2938 | /* Flow control requires full-duplex mode. */ |
2939 | if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || |
2940 | (ifr->ifr_media & IFM_FDX) == 0) |
2941 | ifr->ifr_media &= ~IFM_ETH_FMASK; |
2942 | if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { |
2943 | if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { |
2944 | /* We can do both TXPAUSE and RXPAUSE. */ |
2945 | ifr->ifr_media |= |
2946 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
2947 | } |
2948 | sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; |
2949 | } |
2950 | WM_CORE_UNLOCK(sc); |
2951 | #ifdef WM_MPSAFE |
2952 | s = splnet(); |
2953 | #endif |
2954 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
2955 | #ifdef WM_MPSAFE |
2956 | splx(s); |
2957 | #endif |
2958 | break; |
2959 | case SIOCINITIFADDR: |
2960 | WM_CORE_LOCK(sc); |
2961 | if (ifa->ifa_addr->sa_family == AF_LINK) { |
2962 | sdl = satosdl(ifp->if_dl->ifa_addr); |
2963 | (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, |
2964 | LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); |
2965 | /* unicast address is first multicast entry */ |
2966 | wm_set_filter(sc); |
2967 | error = 0; |
2968 | WM_CORE_UNLOCK(sc); |
2969 | break; |
2970 | } |
2971 | WM_CORE_UNLOCK(sc); |
2972 | /*FALLTHROUGH*/ |
2973 | default: |
2974 | #ifdef WM_MPSAFE |
2975 | s = splnet(); |
2976 | #endif |
2977 | /* It may call wm_start, so unlock here */ |
2978 | error = ether_ioctl(ifp, cmd, data); |
2979 | #ifdef WM_MPSAFE |
2980 | splx(s); |
2981 | #endif |
2982 | if (error != ENETRESET) |
2983 | break; |
2984 | |
2985 | error = 0; |
2986 | |
2987 | if (cmd == SIOCSIFCAP) { |
2988 | error = (*ifp->if_init)(ifp); |
2989 | } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
2990 | ; |
2991 | else if (ifp->if_flags & IFF_RUNNING) { |
2992 | /* |
2993 | * Multicast list has changed; set the hardware filter |
2994 | * accordingly. |
2995 | */ |
2996 | WM_CORE_LOCK(sc); |
2997 | wm_set_filter(sc); |
2998 | WM_CORE_UNLOCK(sc); |
2999 | } |
3000 | break; |
3001 | } |
3002 | |
3003 | #ifndef WM_MPSAFE |
3004 | splx(s); |
3005 | #endif |
3006 | return error; |
3007 | } |
3008 | |
3009 | /* MAC address related */ |
3010 | |
3011 | /* |
3012 | * Get the offset of MAC address and return it. |
3013 | * If error occured, use offset 0. |
3014 | */ |
3015 | static uint16_t |
3016 | wm_check_alt_mac_addr(struct wm_softc *sc) |
3017 | { |
3018 | uint16_t myea[ETHER_ADDR_LEN / 2]; |
3019 | uint16_t offset = NVM_OFF_MACADDR; |
3020 | |
3021 | /* Try to read alternative MAC address pointer */ |
3022 | if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0) |
3023 | return 0; |
3024 | |
3025 | /* Check pointer if it's valid or not. */ |
3026 | if ((offset == 0x0000) || (offset == 0xffff)) |
3027 | return 0; |
3028 | |
3029 | offset += NVM_OFF_MACADDR_82571(sc->sc_funcid); |
3030 | /* |
3031 | * Check whether alternative MAC address is valid or not. |
3032 | * Some cards have non 0xffff pointer but those don't use |
3033 | * alternative MAC address in reality. |
3034 | * |
3035 | * Check whether the broadcast bit is set or not. |
3036 | */ |
3037 | if (wm_nvm_read(sc, offset, 1, myea) == 0) |
3038 | if (((myea[0] & 0xff) & 0x01) == 0) |
3039 | return offset; /* Found */ |
3040 | |
3041 | /* Not found */ |
3042 | return 0; |
3043 | } |
3044 | |
3045 | static int |
3046 | wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr) |
3047 | { |
3048 | uint16_t myea[ETHER_ADDR_LEN / 2]; |
3049 | uint16_t offset = NVM_OFF_MACADDR; |
3050 | int do_invert = 0; |
3051 | |
3052 | switch (sc->sc_type) { |
3053 | case WM_T_82580: |
3054 | case WM_T_I350: |
3055 | case WM_T_I354: |
3056 | /* EEPROM Top Level Partitioning */ |
3057 | offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0; |
3058 | break; |
3059 | case WM_T_82571: |
3060 | case WM_T_82575: |
3061 | case WM_T_82576: |
3062 | case WM_T_80003: |
3063 | case WM_T_I210: |
3064 | case WM_T_I211: |
3065 | offset = wm_check_alt_mac_addr(sc); |
3066 | if (offset == 0) |
3067 | if ((sc->sc_funcid & 0x01) == 1) |
3068 | do_invert = 1; |
3069 | break; |
3070 | default: |
3071 | if ((sc->sc_funcid & 0x01) == 1) |
3072 | do_invert = 1; |
3073 | break; |
3074 | } |
3075 | |
3076 | if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0) |
3077 | goto bad; |
3078 | |
3079 | enaddr[0] = myea[0] & 0xff; |
3080 | enaddr[1] = myea[0] >> 8; |
3081 | enaddr[2] = myea[1] & 0xff; |
3082 | enaddr[3] = myea[1] >> 8; |
3083 | enaddr[4] = myea[2] & 0xff; |
3084 | enaddr[5] = myea[2] >> 8; |
3085 | |
3086 | /* |
3087 | * Toggle the LSB of the MAC address on the second port |
3088 | * of some dual port cards. |
3089 | */ |
3090 | if (do_invert != 0) |
3091 | enaddr[5] ^= 1; |
3092 | |
3093 | return 0; |
3094 | |
3095 | bad: |
3096 | return -1; |
3097 | } |
3098 | |
3099 | /* |
3100 | * wm_set_ral: |
3101 | * |
3102 | * Set an entery in the receive address list. |
3103 | */ |
3104 | static void |
3105 | wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) |
3106 | { |
3107 | uint32_t ral_lo, ral_hi; |
3108 | |
3109 | if (enaddr != NULL) { |
3110 | ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | |
3111 | (enaddr[3] << 24); |
3112 | ral_hi = enaddr[4] | (enaddr[5] << 8); |
3113 | ral_hi |= RAL_AV; |
3114 | } else { |
3115 | ral_lo = 0; |
3116 | ral_hi = 0; |
3117 | } |
3118 | |
3119 | if (sc->sc_type >= WM_T_82544) { |
3120 | CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), |
3121 | ral_lo); |
3122 | CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), |
3123 | ral_hi); |
3124 | } else { |
3125 | CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); |
3126 | CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); |
3127 | } |
3128 | } |
3129 | |
3130 | /* |
3131 | * wm_mchash: |
3132 | * |
3133 | * Compute the hash of the multicast address for the 4096-bit |
3134 | * multicast filter. |
3135 | */ |
3136 | static uint32_t |
3137 | wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) |
3138 | { |
3139 | static const int lo_shift[4] = { 4, 3, 2, 0 }; |
3140 | static const int hi_shift[4] = { 4, 5, 6, 8 }; |
3141 | static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; |
3142 | static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; |
3143 | uint32_t hash; |
3144 | |
3145 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
3146 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
3147 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) |
3148 | || (sc->sc_type == WM_T_PCH_SPT)) { |
3149 | hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | |
3150 | (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); |
3151 | return (hash & 0x3ff); |
3152 | } |
3153 | hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | |
3154 | (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); |
3155 | |
3156 | return (hash & 0xfff); |
3157 | } |
3158 | |
3159 | /* |
3160 | * wm_set_filter: |
3161 | * |
3162 | * Set up the receive filter. |
3163 | */ |
3164 | static void |
3165 | wm_set_filter(struct wm_softc *sc) |
3166 | { |
3167 | struct ethercom *ec = &sc->sc_ethercom; |
3168 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3169 | struct ether_multi *enm; |
3170 | struct ether_multistep step; |
3171 | bus_addr_t mta_reg; |
3172 | uint32_t hash, reg, bit; |
3173 | int i, size, ralmax; |
3174 | |
3175 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
3176 | device_xname(sc->sc_dev), __func__)); |
3177 | |
3178 | if (sc->sc_type >= WM_T_82544) |
3179 | mta_reg = WMREG_CORDOVA_MTA; |
3180 | else |
3181 | mta_reg = WMREG_MTA; |
3182 | |
3183 | sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); |
3184 | |
3185 | if (ifp->if_flags & IFF_BROADCAST) |
3186 | sc->sc_rctl |= RCTL_BAM; |
3187 | if (ifp->if_flags & IFF_PROMISC) { |
3188 | sc->sc_rctl |= RCTL_UPE; |
3189 | goto allmulti; |
3190 | } |
3191 | |
3192 | /* |
3193 | * Set the station address in the first RAL slot, and |
3194 | * clear the remaining slots. |
3195 | */ |
3196 | if (sc->sc_type == WM_T_ICH8) |
3197 | size = WM_RAL_TABSIZE_ICH8 -1; |
3198 | else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) |
3199 | || (sc->sc_type == WM_T_PCH)) |
3200 | size = WM_RAL_TABSIZE_ICH8; |
3201 | else if (sc->sc_type == WM_T_PCH2) |
3202 | size = WM_RAL_TABSIZE_PCH2; |
3203 | else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT)) |
3204 | size = WM_RAL_TABSIZE_PCH_LPT; |
3205 | else if (sc->sc_type == WM_T_82575) |
3206 | size = WM_RAL_TABSIZE_82575; |
3207 | else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) |
3208 | size = WM_RAL_TABSIZE_82576; |
3209 | else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) |
3210 | size = WM_RAL_TABSIZE_I350; |
3211 | else |
3212 | size = WM_RAL_TABSIZE; |
3213 | wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); |
3214 | |
3215 | if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) { |
3216 | i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC); |
3217 | switch (i) { |
3218 | case 0: |
3219 | /* We can use all entries */ |
3220 | ralmax = size; |
3221 | break; |
3222 | case 1: |
3223 | /* Only RAR[0] */ |
3224 | ralmax = 1; |
3225 | break; |
3226 | default: |
3227 | /* available SHRA + RAR[0] */ |
3228 | ralmax = i + 1; |
3229 | } |
3230 | } else |
3231 | ralmax = size; |
3232 | for (i = 1; i < size; i++) { |
3233 | if (i < ralmax) |
3234 | wm_set_ral(sc, NULL, i); |
3235 | } |
3236 | |
3237 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
3238 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
3239 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) |
3240 | || (sc->sc_type == WM_T_PCH_SPT)) |
3241 | size = WM_ICH8_MC_TABSIZE; |
3242 | else |
3243 | size = WM_MC_TABSIZE; |
3244 | /* Clear out the multicast table. */ |
3245 | for (i = 0; i < size; i++) |
3246 | CSR_WRITE(sc, mta_reg + (i << 2), 0); |
3247 | |
3248 | ETHER_FIRST_MULTI(step, ec, enm); |
3249 | while (enm != NULL) { |
3250 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
3251 | /* |
3252 | * We must listen to a range of multicast addresses. |
3253 | * For now, just accept all multicasts, rather than |
3254 | * trying to set only those filter bits needed to match |
3255 | * the range. (At this time, the only use of address |
3256 | * ranges is for IP multicast routing, for which the |
3257 | * range is big enough to require all bits set.) |
3258 | */ |
3259 | goto allmulti; |
3260 | } |
3261 | |
3262 | hash = wm_mchash(sc, enm->enm_addrlo); |
3263 | |
3264 | reg = (hash >> 5); |
3265 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
3266 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
3267 | || (sc->sc_type == WM_T_PCH2) |
3268 | || (sc->sc_type == WM_T_PCH_LPT) |
3269 | || (sc->sc_type == WM_T_PCH_SPT)) |
3270 | reg &= 0x1f; |
3271 | else |
3272 | reg &= 0x7f; |
3273 | bit = hash & 0x1f; |
3274 | |
3275 | hash = CSR_READ(sc, mta_reg + (reg << 2)); |
3276 | hash |= 1U << bit; |
3277 | |
3278 | if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) { |
3279 | /* |
3280 | * 82544 Errata 9: Certain register cannot be written |
3281 | * with particular alignments in PCI-X bus operation |
3282 | * (FCAH, MTA and VFTA). |
3283 | */ |
3284 | bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); |
3285 | CSR_WRITE(sc, mta_reg + (reg << 2), hash); |
3286 | CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); |
3287 | } else |
3288 | CSR_WRITE(sc, mta_reg + (reg << 2), hash); |
3289 | |
3290 | ETHER_NEXT_MULTI(step, enm); |
3291 | } |
3292 | |
3293 | ifp->if_flags &= ~IFF_ALLMULTI; |
3294 | goto setit; |
3295 | |
3296 | allmulti: |
3297 | ifp->if_flags |= IFF_ALLMULTI; |
3298 | sc->sc_rctl |= RCTL_MPE; |
3299 | |
3300 | setit: |
3301 | CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); |
3302 | } |
3303 | |
3304 | /* Reset and init related */ |
3305 | |
3306 | static void |
3307 | wm_set_vlan(struct wm_softc *sc) |
3308 | { |
3309 | |
3310 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
3311 | device_xname(sc->sc_dev), __func__)); |
3312 | |
3313 | /* Deal with VLAN enables. */ |
3314 | if (VLAN_ATTACHED(&sc->sc_ethercom)) |
3315 | sc->sc_ctrl |= CTRL_VME; |
3316 | else |
3317 | sc->sc_ctrl &= ~CTRL_VME; |
3318 | |
3319 | /* Write the control registers. */ |
3320 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
3321 | } |
3322 | |
3323 | static void |
3324 | wm_set_pcie_completion_timeout(struct wm_softc *sc) |
3325 | { |
3326 | uint32_t gcr; |
3327 | pcireg_t ctrl2; |
3328 | |
3329 | gcr = CSR_READ(sc, WMREG_GCR); |
3330 | |
3331 | /* Only take action if timeout value is defaulted to 0 */ |
3332 | if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) |
3333 | goto out; |
3334 | |
3335 | if ((gcr & GCR_CAP_VER2) == 0) { |
3336 | gcr |= GCR_CMPL_TMOUT_10MS; |
3337 | goto out; |
3338 | } |
3339 | |
3340 | ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
3341 | sc->sc_pcixe_capoff + PCIE_DCSR2); |
3342 | ctrl2 |= WM_PCIE_DCSR2_16MS; |
3343 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, |
3344 | sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); |
3345 | |
3346 | out: |
3347 | /* Disable completion timeout resend */ |
3348 | gcr &= ~GCR_CMPL_TMOUT_RESEND; |
3349 | |
3350 | CSR_WRITE(sc, WMREG_GCR, gcr); |
3351 | } |
3352 | |
3353 | void |
3354 | wm_get_auto_rd_done(struct wm_softc *sc) |
3355 | { |
3356 | int i; |
3357 | |
3358 | /* wait for eeprom to reload */ |
3359 | switch (sc->sc_type) { |
3360 | case WM_T_82571: |
3361 | case WM_T_82572: |
3362 | case WM_T_82573: |
3363 | case WM_T_82574: |
3364 | case WM_T_82583: |
3365 | case WM_T_82575: |
3366 | case WM_T_82576: |
3367 | case WM_T_82580: |
3368 | case WM_T_I350: |
3369 | case WM_T_I354: |
3370 | case WM_T_I210: |
3371 | case WM_T_I211: |
3372 | case WM_T_80003: |
3373 | case WM_T_ICH8: |
3374 | case WM_T_ICH9: |
3375 | for (i = 0; i < 10; i++) { |
3376 | if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) |
3377 | break; |
3378 | delay(1000); |
3379 | } |
3380 | if (i == 10) { |
3381 | log(LOG_ERR, "%s: auto read from eeprom failed to " |
3382 | "complete\n" , device_xname(sc->sc_dev)); |
3383 | } |
3384 | break; |
3385 | default: |
3386 | break; |
3387 | } |
3388 | } |
3389 | |
3390 | void |
3391 | wm_lan_init_done(struct wm_softc *sc) |
3392 | { |
3393 | uint32_t reg = 0; |
3394 | int i; |
3395 | |
3396 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
3397 | device_xname(sc->sc_dev), __func__)); |
3398 | |
3399 | /* Wait for eeprom to reload */ |
3400 | switch (sc->sc_type) { |
3401 | case WM_T_ICH10: |
3402 | case WM_T_PCH: |
3403 | case WM_T_PCH2: |
3404 | case WM_T_PCH_LPT: |
3405 | case WM_T_PCH_SPT: |
3406 | for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { |
3407 | reg = CSR_READ(sc, WMREG_STATUS); |
3408 | if ((reg & STATUS_LAN_INIT_DONE) != 0) |
3409 | break; |
3410 | delay(100); |
3411 | } |
3412 | if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { |
3413 | log(LOG_ERR, "%s: %s: lan_init_done failed to " |
3414 | "complete\n" , device_xname(sc->sc_dev), __func__); |
3415 | } |
3416 | break; |
3417 | default: |
3418 | panic("%s: %s: unknown type\n" , device_xname(sc->sc_dev), |
3419 | __func__); |
3420 | break; |
3421 | } |
3422 | |
3423 | reg &= ~STATUS_LAN_INIT_DONE; |
3424 | CSR_WRITE(sc, WMREG_STATUS, reg); |
3425 | } |
3426 | |
3427 | void |
3428 | wm_get_cfg_done(struct wm_softc *sc) |
3429 | { |
3430 | int mask; |
3431 | uint32_t reg; |
3432 | int i; |
3433 | |
3434 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
3435 | device_xname(sc->sc_dev), __func__)); |
3436 | |
3437 | /* Wait for eeprom to reload */ |
3438 | switch (sc->sc_type) { |
3439 | case WM_T_82542_2_0: |
3440 | case WM_T_82542_2_1: |
3441 | /* null */ |
3442 | break; |
3443 | case WM_T_82543: |
3444 | case WM_T_82544: |
3445 | case WM_T_82540: |
3446 | case WM_T_82545: |
3447 | case WM_T_82545_3: |
3448 | case WM_T_82546: |
3449 | case WM_T_82546_3: |
3450 | case WM_T_82541: |
3451 | case WM_T_82541_2: |
3452 | case WM_T_82547: |
3453 | case WM_T_82547_2: |
3454 | case WM_T_82573: |
3455 | case WM_T_82574: |
3456 | case WM_T_82583: |
3457 | /* generic */ |
3458 | delay(10*1000); |
3459 | break; |
3460 | case WM_T_80003: |
3461 | case WM_T_82571: |
3462 | case WM_T_82572: |
3463 | case WM_T_82575: |
3464 | case WM_T_82576: |
3465 | case WM_T_82580: |
3466 | case WM_T_I350: |
3467 | case WM_T_I354: |
3468 | case WM_T_I210: |
3469 | case WM_T_I211: |
3470 | if (sc->sc_type == WM_T_82571) { |
3471 | /* Only 82571 shares port 0 */ |
3472 | mask = EEMNGCTL_CFGDONE_0; |
3473 | } else |
3474 | mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; |
3475 | for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { |
3476 | if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) |
3477 | break; |
3478 | delay(1000); |
3479 | } |
3480 | if (i >= WM_PHY_CFG_TIMEOUT) { |
3481 | DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n" , |
3482 | device_xname(sc->sc_dev), __func__)); |
3483 | } |
3484 | break; |
3485 | case WM_T_ICH8: |
3486 | case WM_T_ICH9: |
3487 | case WM_T_ICH10: |
3488 | case WM_T_PCH: |
3489 | case WM_T_PCH2: |
3490 | case WM_T_PCH_LPT: |
3491 | case WM_T_PCH_SPT: |
3492 | delay(10*1000); |
3493 | if (sc->sc_type >= WM_T_ICH10) |
3494 | wm_lan_init_done(sc); |
3495 | else |
3496 | wm_get_auto_rd_done(sc); |
3497 | |
3498 | reg = CSR_READ(sc, WMREG_STATUS); |
3499 | if ((reg & STATUS_PHYRA) != 0) |
3500 | CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); |
3501 | break; |
3502 | default: |
3503 | panic("%s: %s: unknown type\n" , device_xname(sc->sc_dev), |
3504 | __func__); |
3505 | break; |
3506 | } |
3507 | } |
3508 | |
3509 | /* Init hardware bits */ |
3510 | void |
3511 | wm_initialize_hardware_bits(struct wm_softc *sc) |
3512 | { |
3513 | uint32_t tarc0, tarc1, reg; |
3514 | |
3515 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
3516 | device_xname(sc->sc_dev), __func__)); |
3517 | |
3518 | /* For 82571 variant, 80003 and ICHs */ |
3519 | if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583)) |
3520 | || (sc->sc_type >= WM_T_80003)) { |
3521 | |
3522 | /* Transmit Descriptor Control 0 */ |
3523 | reg = CSR_READ(sc, WMREG_TXDCTL(0)); |
3524 | reg |= TXDCTL_COUNT_DESC; |
3525 | CSR_WRITE(sc, WMREG_TXDCTL(0), reg); |
3526 | |
3527 | /* Transmit Descriptor Control 1 */ |
3528 | reg = CSR_READ(sc, WMREG_TXDCTL(1)); |
3529 | reg |= TXDCTL_COUNT_DESC; |
3530 | CSR_WRITE(sc, WMREG_TXDCTL(1), reg); |
3531 | |
3532 | /* TARC0 */ |
3533 | tarc0 = CSR_READ(sc, WMREG_TARC0); |
3534 | switch (sc->sc_type) { |
3535 | case WM_T_82571: |
3536 | case WM_T_82572: |
3537 | case WM_T_82573: |
3538 | case WM_T_82574: |
3539 | case WM_T_82583: |
3540 | case WM_T_80003: |
3541 | /* Clear bits 30..27 */ |
3542 | tarc0 &= ~__BITS(30, 27); |
3543 | break; |
3544 | default: |
3545 | break; |
3546 | } |
3547 | |
3548 | switch (sc->sc_type) { |
3549 | case WM_T_82571: |
3550 | case WM_T_82572: |
3551 | tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */ |
3552 | |
3553 | tarc1 = CSR_READ(sc, WMREG_TARC1); |
3554 | tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */ |
3555 | tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */ |
3556 | /* 8257[12] Errata No.7 */ |
3557 | tarc1 |= __BIT(22); /* TARC1 bits 22 */ |
3558 | |
3559 | /* TARC1 bit 28 */ |
3560 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) |
3561 | tarc1 &= ~__BIT(28); |
3562 | else |
3563 | tarc1 |= __BIT(28); |
3564 | CSR_WRITE(sc, WMREG_TARC1, tarc1); |
3565 | |
3566 | /* |
3567 | * 8257[12] Errata No.13 |
3568 | * Disable Dyamic Clock Gating. |
3569 | */ |
3570 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
3571 | reg &= ~CTRL_EXT_DMA_DYN_CLK; |
3572 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3573 | break; |
3574 | case WM_T_82573: |
3575 | case WM_T_82574: |
3576 | case WM_T_82583: |
3577 | if ((sc->sc_type == WM_T_82574) |
3578 | || (sc->sc_type == WM_T_82583)) |
3579 | tarc0 |= __BIT(26); /* TARC0 bit 26 */ |
3580 | |
3581 | /* Extended Device Control */ |
3582 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
3583 | reg &= ~__BIT(23); /* Clear bit 23 */ |
3584 | reg |= __BIT(22); /* Set bit 22 */ |
3585 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3586 | |
3587 | /* Device Control */ |
3588 | sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */ |
3589 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
3590 | |
3591 | /* PCIe Control Register */ |
3592 | /* |
3593 | * 82573 Errata (unknown). |
3594 | * |
3595 | * 82574 Errata 25 and 82583 Errata 12 |
3596 | * "Dropped Rx Packets": |
3597 | * NVM Image Version 2.1.4 and newer has no this bug. |
3598 | */ |
3599 | reg = CSR_READ(sc, WMREG_GCR); |
3600 | reg |= GCR_L1_ACT_WITHOUT_L0S_RX; |
3601 | CSR_WRITE(sc, WMREG_GCR, reg); |
3602 | |
3603 | if ((sc->sc_type == WM_T_82574) |
3604 | || (sc->sc_type == WM_T_82583)) { |
3605 | /* |
3606 | * Document says this bit must be set for |
3607 | * proper operation. |
3608 | */ |
3609 | reg = CSR_READ(sc, WMREG_GCR); |
3610 | reg |= __BIT(22); |
3611 | CSR_WRITE(sc, WMREG_GCR, reg); |
3612 | |
3613 | /* |
3614 | * Apply workaround for hardware errata |
3615 | * documented in errata docs Fixes issue where |
3616 | * some error prone or unreliable PCIe |
3617 | * completions are occurring, particularly |
3618 | * with ASPM enabled. Without fix, issue can |
3619 | * cause Tx timeouts. |
3620 | */ |
3621 | reg = CSR_READ(sc, WMREG_GCR2); |
3622 | reg |= __BIT(0); |
3623 | CSR_WRITE(sc, WMREG_GCR2, reg); |
3624 | } |
3625 | break; |
3626 | case WM_T_80003: |
3627 | /* TARC0 */ |
3628 | if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) |
3629 | || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) |
3630 | tarc0 &= ~__BIT(20); /* Clear bits 20 */ |
3631 | |
3632 | /* TARC1 bit 28 */ |
3633 | tarc1 = CSR_READ(sc, WMREG_TARC1); |
3634 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) |
3635 | tarc1 &= ~__BIT(28); |
3636 | else |
3637 | tarc1 |= __BIT(28); |
3638 | CSR_WRITE(sc, WMREG_TARC1, tarc1); |
3639 | break; |
3640 | case WM_T_ICH8: |
3641 | case WM_T_ICH9: |
3642 | case WM_T_ICH10: |
3643 | case WM_T_PCH: |
3644 | case WM_T_PCH2: |
3645 | case WM_T_PCH_LPT: |
3646 | case WM_T_PCH_SPT: |
3647 | /* TARC0 */ |
3648 | if ((sc->sc_type == WM_T_ICH8) |
3649 | || (sc->sc_type == WM_T_PCH_SPT)) { |
3650 | /* Set TARC0 bits 29 and 28 */ |
3651 | tarc0 |= __BITS(29, 28); |
3652 | } |
3653 | /* Set TARC0 bits 23,24,26,27 */ |
3654 | tarc0 |= __BITS(27, 26) | __BITS(24, 23); |
3655 | |
3656 | /* CTRL_EXT */ |
3657 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
3658 | reg |= __BIT(22); /* Set bit 22 */ |
3659 | /* |
3660 | * Enable PHY low-power state when MAC is at D3 |
3661 | * w/o WoL |
3662 | */ |
3663 | if (sc->sc_type >= WM_T_PCH) |
3664 | reg |= CTRL_EXT_PHYPDEN; |
3665 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3666 | |
3667 | /* TARC1 */ |
3668 | tarc1 = CSR_READ(sc, WMREG_TARC1); |
3669 | /* bit 28 */ |
3670 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) |
3671 | tarc1 &= ~__BIT(28); |
3672 | else |
3673 | tarc1 |= __BIT(28); |
3674 | tarc1 |= __BIT(24) | __BIT(26) | __BIT(30); |
3675 | CSR_WRITE(sc, WMREG_TARC1, tarc1); |
3676 | |
3677 | /* Device Status */ |
3678 | if (sc->sc_type == WM_T_ICH8) { |
3679 | reg = CSR_READ(sc, WMREG_STATUS); |
3680 | reg &= ~__BIT(31); |
3681 | CSR_WRITE(sc, WMREG_STATUS, reg); |
3682 | |
3683 | } |
3684 | |
3685 | /* IOSFPC */ |
3686 | if (sc->sc_type == WM_T_PCH_SPT) { |
3687 | reg = CSR_READ(sc, WMREG_IOSFPC); |
3688 | reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */ |
3689 | CSR_WRITE(sc, WMREG_IOSFPC, reg); |
3690 | } |
3691 | /* |
3692 | * Work-around descriptor data corruption issue during |
3693 | * NFS v2 UDP traffic, just disable the NFS filtering |
3694 | * capability. |
3695 | */ |
3696 | reg = CSR_READ(sc, WMREG_RFCTL); |
3697 | reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS; |
3698 | CSR_WRITE(sc, WMREG_RFCTL, reg); |
3699 | break; |
3700 | default: |
3701 | break; |
3702 | } |
3703 | CSR_WRITE(sc, WMREG_TARC0, tarc0); |
3704 | |
3705 | /* |
3706 | * 8257[12] Errata No.52 and some others. |
3707 | * Avoid RSS Hash Value bug. |
3708 | */ |
3709 | switch (sc->sc_type) { |
3710 | case WM_T_82571: |
3711 | case WM_T_82572: |
3712 | case WM_T_82573: |
3713 | case WM_T_80003: |
3714 | case WM_T_ICH8: |
3715 | reg = CSR_READ(sc, WMREG_RFCTL); |
3716 | reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS; |
3717 | CSR_WRITE(sc, WMREG_RFCTL, reg); |
3718 | break; |
3719 | default: |
3720 | break; |
3721 | } |
3722 | } |
3723 | } |
3724 | |
3725 | static uint32_t |
3726 | wm_rxpbs_adjust_82580(uint32_t val) |
3727 | { |
3728 | uint32_t rv = 0; |
3729 | |
3730 | if (val < __arraycount(wm_82580_rxpbs_table)) |
3731 | rv = wm_82580_rxpbs_table[val]; |
3732 | |
3733 | return rv; |
3734 | } |
3735 | |
3736 | /* |
3737 | * wm_reset_phy: |
3738 | * |
3739 | * generic PHY reset function. |
3740 | * Same as e1000_phy_hw_reset_generic() |
3741 | */ |
3742 | static void |
3743 | wm_reset_phy(struct wm_softc *sc) |
3744 | { |
3745 | uint32_t reg; |
3746 | |
3747 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
3748 | device_xname(sc->sc_dev), __func__)); |
3749 | if (wm_phy_resetisblocked(sc)) |
3750 | return; |
3751 | |
3752 | sc->phy.acquire(sc); |
3753 | |
3754 | reg = CSR_READ(sc, WMREG_CTRL); |
3755 | CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET); |
3756 | CSR_WRITE_FLUSH(sc); |
3757 | |
3758 | delay(sc->phy.reset_delay_us); |
3759 | |
3760 | CSR_WRITE(sc, WMREG_CTRL, reg); |
3761 | CSR_WRITE_FLUSH(sc); |
3762 | |
3763 | delay(150); |
3764 | |
3765 | sc->phy.release(sc); |
3766 | |
3767 | wm_get_cfg_done(sc); |
3768 | } |
3769 | |
3770 | static void |
3771 | wm_flush_desc_rings(struct wm_softc *sc) |
3772 | { |
3773 | pcireg_t preg; |
3774 | uint32_t reg; |
3775 | int nexttx; |
3776 | |
3777 | /* First, disable MULR fix in FEXTNVM11 */ |
3778 | reg = CSR_READ(sc, WMREG_FEXTNVM11); |
3779 | reg |= FEXTNVM11_DIS_MULRFIX; |
3780 | CSR_WRITE(sc, WMREG_FEXTNVM11, reg); |
3781 | |
3782 | preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS); |
3783 | reg = CSR_READ(sc, WMREG_TDLEN(0)); |
3784 | if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) { |
3785 | struct wm_txqueue *txq; |
3786 | wiseman_txdesc_t *txd; |
3787 | |
3788 | /* TX */ |
3789 | printf("%s: Need TX flush (reg = %08x, len = %u)\n" , |
3790 | device_xname(sc->sc_dev), preg, reg); |
3791 | reg = CSR_READ(sc, WMREG_TCTL); |
3792 | CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN); |
3793 | |
3794 | txq = &sc->sc_queue[0].wmq_txq; |
3795 | nexttx = txq->txq_next; |
3796 | txd = &txq->txq_descs[nexttx]; |
3797 | wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx)); |
3798 | txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512); |
3799 | txd->wtx_fields.wtxu_status = 0; |
3800 | txd->wtx_fields.wtxu_options = 0; |
3801 | txd->wtx_fields.wtxu_vlan = 0; |
3802 | |
3803 | bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, |
3804 | BUS_SPACE_BARRIER_WRITE); |
3805 | |
3806 | txq->txq_next = WM_NEXTTX(txq, txq->txq_next); |
3807 | CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next); |
3808 | bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, |
3809 | BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); |
3810 | delay(250); |
3811 | } |
3812 | preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS); |
3813 | if (preg & DESCRING_STATUS_FLUSH_REQ) { |
3814 | uint32_t rctl; |
3815 | |
3816 | /* RX */ |
3817 | printf("%s: Need RX flush (reg = %08x)\n" , |
3818 | device_xname(sc->sc_dev), preg); |
3819 | rctl = CSR_READ(sc, WMREG_RCTL); |
3820 | CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); |
3821 | CSR_WRITE_FLUSH(sc); |
3822 | delay(150); |
3823 | |
3824 | reg = CSR_READ(sc, WMREG_RXDCTL(0)); |
3825 | /* zero the lower 14 bits (prefetch and host thresholds) */ |
3826 | reg &= 0xffffc000; |
3827 | /* |
3828 | * update thresholds: prefetch threshold to 31, host threshold |
3829 | * to 1 and make sure the granularity is "descriptors" and not |
3830 | * "cache lines" |
3831 | */ |
3832 | reg |= (0x1f | (1 << 8) | RXDCTL_GRAN); |
3833 | CSR_WRITE(sc, WMREG_RXDCTL(0), reg); |
3834 | |
3835 | /* |
3836 | * momentarily enable the RX ring for the changes to take |
3837 | * effect |
3838 | */ |
3839 | CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN); |
3840 | CSR_WRITE_FLUSH(sc); |
3841 | delay(150); |
3842 | CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); |
3843 | } |
3844 | } |
3845 | |
3846 | /* |
3847 | * wm_reset: |
3848 | * |
3849 | * Reset the i82542 chip. |
3850 | */ |
3851 | static void |
3852 | wm_reset(struct wm_softc *sc) |
3853 | { |
3854 | int phy_reset = 0; |
3855 | int i, error = 0; |
3856 | uint32_t reg; |
3857 | |
3858 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
3859 | device_xname(sc->sc_dev), __func__)); |
3860 | KASSERT(sc->sc_type != 0); |
3861 | |
3862 | /* |
3863 | * Allocate on-chip memory according to the MTU size. |
3864 | * The Packet Buffer Allocation register must be written |
3865 | * before the chip is reset. |
3866 | */ |
3867 | switch (sc->sc_type) { |
3868 | case WM_T_82547: |
3869 | case WM_T_82547_2: |
3870 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? |
3871 | PBA_22K : PBA_30K; |
3872 | for (i = 0; i < sc->sc_nqueues; i++) { |
3873 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
3874 | txq->txq_fifo_head = 0; |
3875 | txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; |
3876 | txq->txq_fifo_size = |
3877 | (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; |
3878 | txq->txq_fifo_stall = 0; |
3879 | } |
3880 | break; |
3881 | case WM_T_82571: |
3882 | case WM_T_82572: |
3883 | case WM_T_82575: /* XXX need special handing for jumbo frames */ |
3884 | case WM_T_80003: |
3885 | sc->sc_pba = PBA_32K; |
3886 | break; |
3887 | case WM_T_82573: |
3888 | sc->sc_pba = PBA_12K; |
3889 | break; |
3890 | case WM_T_82574: |
3891 | case WM_T_82583: |
3892 | sc->sc_pba = PBA_20K; |
3893 | break; |
3894 | case WM_T_82576: |
3895 | sc->sc_pba = CSR_READ(sc, WMREG_RXPBS); |
3896 | sc->sc_pba &= RXPBS_SIZE_MASK_82576; |
3897 | break; |
3898 | case WM_T_82580: |
3899 | case WM_T_I350: |
3900 | case WM_T_I354: |
3901 | sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS)); |
3902 | break; |
3903 | case WM_T_I210: |
3904 | case WM_T_I211: |
3905 | sc->sc_pba = PBA_34K; |
3906 | break; |
3907 | case WM_T_ICH8: |
3908 | /* Workaround for a bit corruption issue in FIFO memory */ |
3909 | sc->sc_pba = PBA_8K; |
3910 | CSR_WRITE(sc, WMREG_PBS, PBA_16K); |
3911 | break; |
3912 | case WM_T_ICH9: |
3913 | case WM_T_ICH10: |
3914 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ? |
3915 | PBA_14K : PBA_10K; |
3916 | break; |
3917 | case WM_T_PCH: |
3918 | case WM_T_PCH2: |
3919 | case WM_T_PCH_LPT: |
3920 | case WM_T_PCH_SPT: |
3921 | sc->sc_pba = PBA_26K; |
3922 | break; |
3923 | default: |
3924 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? |
3925 | PBA_40K : PBA_48K; |
3926 | break; |
3927 | } |
3928 | /* |
3929 | * Only old or non-multiqueue devices have the PBA register |
3930 | * XXX Need special handling for 82575. |
3931 | */ |
3932 | if (((sc->sc_flags & WM_F_NEWQUEUE) == 0) |
3933 | || (sc->sc_type == WM_T_82575)) |
3934 | CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); |
3935 | |
3936 | /* Prevent the PCI-E bus from sticking */ |
3937 | if (sc->sc_flags & WM_F_PCIE) { |
3938 | int timeout = 800; |
3939 | |
3940 | sc->sc_ctrl |= CTRL_GIO_M_DIS; |
3941 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
3942 | |
3943 | while (timeout--) { |
3944 | if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) |
3945 | == 0) |
3946 | break; |
3947 | delay(100); |
3948 | } |
3949 | } |
3950 | |
3951 | /* Set the completion timeout for interface */ |
3952 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) |
3953 | || (sc->sc_type == WM_T_82580) |
3954 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) |
3955 | || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) |
3956 | wm_set_pcie_completion_timeout(sc); |
3957 | |
3958 | /* Clear interrupt */ |
3959 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
3960 | if (sc->sc_nintrs > 1) { |
3961 | if (sc->sc_type != WM_T_82574) { |
3962 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); |
3963 | CSR_WRITE(sc, WMREG_EIAC, 0); |
3964 | } else { |
3965 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); |
3966 | } |
3967 | } |
3968 | |
3969 | /* Stop the transmit and receive processes. */ |
3970 | CSR_WRITE(sc, WMREG_RCTL, 0); |
3971 | sc->sc_rctl &= ~RCTL_EN; |
3972 | CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); |
3973 | CSR_WRITE_FLUSH(sc); |
3974 | |
3975 | /* XXX set_tbi_sbp_82543() */ |
3976 | |
3977 | delay(10*1000); |
3978 | |
3979 | /* Must acquire the MDIO ownership before MAC reset */ |
3980 | switch (sc->sc_type) { |
3981 | case WM_T_82573: |
3982 | case WM_T_82574: |
3983 | case WM_T_82583: |
3984 | error = wm_get_hw_semaphore_82573(sc); |
3985 | break; |
3986 | default: |
3987 | break; |
3988 | } |
3989 | |
3990 | /* |
3991 | * 82541 Errata 29? & 82547 Errata 28? |
3992 | * See also the description about PHY_RST bit in CTRL register |
3993 | * in 8254x_GBe_SDM.pdf. |
3994 | */ |
3995 | if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { |
3996 | CSR_WRITE(sc, WMREG_CTRL, |
3997 | CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); |
3998 | CSR_WRITE_FLUSH(sc); |
3999 | delay(5000); |
4000 | } |
4001 | |
4002 | switch (sc->sc_type) { |
4003 | case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ |
4004 | case WM_T_82541: |
4005 | case WM_T_82541_2: |
4006 | case WM_T_82547: |
4007 | case WM_T_82547_2: |
4008 | /* |
4009 | * On some chipsets, a reset through a memory-mapped write |
4010 | * cycle can cause the chip to reset before completing the |
4011 | * write cycle. This causes major headache that can be |
4012 | * avoided by issuing the reset via indirect register writes |
4013 | * through I/O space. |
4014 | * |
4015 | * So, if we successfully mapped the I/O BAR at attach time, |
4016 | * use that. Otherwise, try our luck with a memory-mapped |
4017 | * reset. |
4018 | */ |
4019 | if (sc->sc_flags & WM_F_IOH_VALID) |
4020 | wm_io_write(sc, WMREG_CTRL, CTRL_RST); |
4021 | else |
4022 | CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); |
4023 | break; |
4024 | case WM_T_82545_3: |
4025 | case WM_T_82546_3: |
4026 | /* Use the shadow control register on these chips. */ |
4027 | CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); |
4028 | break; |
4029 | case WM_T_80003: |
4030 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; |
4031 | sc->phy.acquire(sc); |
4032 | CSR_WRITE(sc, WMREG_CTRL, reg); |
4033 | sc->phy.release(sc); |
4034 | break; |
4035 | case WM_T_ICH8: |
4036 | case WM_T_ICH9: |
4037 | case WM_T_ICH10: |
4038 | case WM_T_PCH: |
4039 | case WM_T_PCH2: |
4040 | case WM_T_PCH_LPT: |
4041 | case WM_T_PCH_SPT: |
4042 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; |
4043 | if (wm_phy_resetisblocked(sc) == false) { |
4044 | /* |
4045 | * Gate automatic PHY configuration by hardware on |
4046 | * non-managed 82579 |
4047 | */ |
4048 | if ((sc->sc_type == WM_T_PCH2) |
4049 | && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) |
4050 | == 0)) |
4051 | wm_gate_hw_phy_config_ich8lan(sc, true); |
4052 | |
4053 | reg |= CTRL_PHY_RESET; |
4054 | phy_reset = 1; |
4055 | } else |
4056 | printf("XXX reset is blocked!!!\n" ); |
4057 | sc->phy.acquire(sc); |
4058 | CSR_WRITE(sc, WMREG_CTRL, reg); |
4059 | /* Don't insert a completion barrier when reset */ |
4060 | delay(20*1000); |
4061 | mutex_exit(sc->sc_ich_phymtx); |
4062 | break; |
4063 | case WM_T_82580: |
4064 | case WM_T_I350: |
4065 | case WM_T_I354: |
4066 | case WM_T_I210: |
4067 | case WM_T_I211: |
4068 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); |
4069 | if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII) |
4070 | CSR_WRITE_FLUSH(sc); |
4071 | delay(5000); |
4072 | break; |
4073 | case WM_T_82542_2_0: |
4074 | case WM_T_82542_2_1: |
4075 | case WM_T_82543: |
4076 | case WM_T_82540: |
4077 | case WM_T_82545: |
4078 | case WM_T_82546: |
4079 | case WM_T_82571: |
4080 | case WM_T_82572: |
4081 | case WM_T_82573: |
4082 | case WM_T_82574: |
4083 | case WM_T_82575: |
4084 | case WM_T_82576: |
4085 | case WM_T_82583: |
4086 | default: |
4087 | /* Everything else can safely use the documented method. */ |
4088 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); |
4089 | break; |
4090 | } |
4091 | |
4092 | /* Must release the MDIO ownership after MAC reset */ |
4093 | switch (sc->sc_type) { |
4094 | case WM_T_82573: |
4095 | case WM_T_82574: |
4096 | case WM_T_82583: |
4097 | if (error == 0) |
4098 | wm_put_hw_semaphore_82573(sc); |
4099 | break; |
4100 | default: |
4101 | break; |
4102 | } |
4103 | |
4104 | if (phy_reset != 0) |
4105 | wm_get_cfg_done(sc); |
4106 | |
4107 | /* reload EEPROM */ |
4108 | switch (sc->sc_type) { |
4109 | case WM_T_82542_2_0: |
4110 | case WM_T_82542_2_1: |
4111 | case WM_T_82543: |
4112 | case WM_T_82544: |
4113 | delay(10); |
4114 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; |
4115 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4116 | CSR_WRITE_FLUSH(sc); |
4117 | delay(2000); |
4118 | break; |
4119 | case WM_T_82540: |
4120 | case WM_T_82545: |
4121 | case WM_T_82545_3: |
4122 | case WM_T_82546: |
4123 | case WM_T_82546_3: |
4124 | delay(5*1000); |
4125 | /* XXX Disable HW ARPs on ASF enabled adapters */ |
4126 | break; |
4127 | case WM_T_82541: |
4128 | case WM_T_82541_2: |
4129 | case WM_T_82547: |
4130 | case WM_T_82547_2: |
4131 | delay(20000); |
4132 | /* XXX Disable HW ARPs on ASF enabled adapters */ |
4133 | break; |
4134 | case WM_T_82571: |
4135 | case WM_T_82572: |
4136 | case WM_T_82573: |
4137 | case WM_T_82574: |
4138 | case WM_T_82583: |
4139 | if (sc->sc_flags & WM_F_EEPROM_FLASH) { |
4140 | delay(10); |
4141 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; |
4142 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4143 | CSR_WRITE_FLUSH(sc); |
4144 | } |
4145 | /* check EECD_EE_AUTORD */ |
4146 | wm_get_auto_rd_done(sc); |
4147 | /* |
4148 | * Phy configuration from NVM just starts after EECD_AUTO_RD |
4149 | * is set. |
4150 | */ |
4151 | if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) |
4152 | || (sc->sc_type == WM_T_82583)) |
4153 | delay(25*1000); |
4154 | break; |
4155 | case WM_T_82575: |
4156 | case WM_T_82576: |
4157 | case WM_T_82580: |
4158 | case WM_T_I350: |
4159 | case WM_T_I354: |
4160 | case WM_T_I210: |
4161 | case WM_T_I211: |
4162 | case WM_T_80003: |
4163 | /* check EECD_EE_AUTORD */ |
4164 | wm_get_auto_rd_done(sc); |
4165 | break; |
4166 | case WM_T_ICH8: |
4167 | case WM_T_ICH9: |
4168 | case WM_T_ICH10: |
4169 | case WM_T_PCH: |
4170 | case WM_T_PCH2: |
4171 | case WM_T_PCH_LPT: |
4172 | case WM_T_PCH_SPT: |
4173 | break; |
4174 | default: |
4175 | panic("%s: unknown type\n" , __func__); |
4176 | } |
4177 | |
4178 | /* Check whether EEPROM is present or not */ |
4179 | switch (sc->sc_type) { |
4180 | case WM_T_82575: |
4181 | case WM_T_82576: |
4182 | case WM_T_82580: |
4183 | case WM_T_I350: |
4184 | case WM_T_I354: |
4185 | case WM_T_ICH8: |
4186 | case WM_T_ICH9: |
4187 | if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { |
4188 | /* Not found */ |
4189 | sc->sc_flags |= WM_F_EEPROM_INVALID; |
4190 | if (sc->sc_type == WM_T_82575) |
4191 | wm_reset_init_script_82575(sc); |
4192 | } |
4193 | break; |
4194 | default: |
4195 | break; |
4196 | } |
4197 | |
4198 | if ((sc->sc_type == WM_T_82580) |
4199 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) { |
4200 | /* clear global device reset status bit */ |
4201 | CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); |
4202 | } |
4203 | |
4204 | /* Clear any pending interrupt events. */ |
4205 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
4206 | reg = CSR_READ(sc, WMREG_ICR); |
4207 | if (sc->sc_nintrs > 1) { |
4208 | if (sc->sc_type != WM_T_82574) { |
4209 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); |
4210 | CSR_WRITE(sc, WMREG_EIAC, 0); |
4211 | } else |
4212 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); |
4213 | } |
4214 | |
4215 | /* reload sc_ctrl */ |
4216 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); |
4217 | |
4218 | if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)) |
4219 | wm_set_eee_i350(sc); |
4220 | |
4221 | /* Clear the host wakeup bit after lcd reset */ |
4222 | if (sc->sc_type >= WM_T_PCH) { |
4223 | reg = wm_gmii_hv_readreg(sc->sc_dev, 2, |
4224 | BM_PORT_GEN_CFG); |
4225 | reg &= ~BM_WUC_HOST_WU_BIT; |
4226 | wm_gmii_hv_writereg(sc->sc_dev, 2, |
4227 | BM_PORT_GEN_CFG, reg); |
4228 | } |
4229 | |
4230 | /* |
4231 | * For PCH, this write will make sure that any noise will be detected |
4232 | * as a CRC error and be dropped rather than show up as a bad packet |
4233 | * to the DMA engine |
4234 | */ |
4235 | if (sc->sc_type == WM_T_PCH) |
4236 | CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); |
4237 | |
4238 | if (sc->sc_type >= WM_T_82544) |
4239 | CSR_WRITE(sc, WMREG_WUC, 0); |
4240 | |
4241 | wm_reset_mdicnfg_82580(sc); |
4242 | |
4243 | if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) |
4244 | wm_pll_workaround_i210(sc); |
4245 | } |
4246 | |
4247 | /* |
4248 | * wm_add_rxbuf: |
4249 | * |
4250 | * Add a receive buffer to the indiciated descriptor. |
4251 | */ |
4252 | static int |
4253 | wm_add_rxbuf(struct wm_rxqueue *rxq, int idx) |
4254 | { |
4255 | struct wm_softc *sc = rxq->rxq_sc; |
4256 | struct wm_rxsoft *rxs = &rxq->rxq_soft[idx]; |
4257 | struct mbuf *m; |
4258 | int error; |
4259 | |
4260 | KASSERT(mutex_owned(rxq->rxq_lock)); |
4261 | |
4262 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
4263 | if (m == NULL) |
4264 | return ENOBUFS; |
4265 | |
4266 | MCLGET(m, M_DONTWAIT); |
4267 | if ((m->m_flags & M_EXT) == 0) { |
4268 | m_freem(m); |
4269 | return ENOBUFS; |
4270 | } |
4271 | |
4272 | if (rxs->rxs_mbuf != NULL) |
4273 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
4274 | |
4275 | rxs->rxs_mbuf = m; |
4276 | |
4277 | m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; |
4278 | error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, |
4279 | BUS_DMA_READ | BUS_DMA_NOWAIT); |
4280 | if (error) { |
4281 | /* XXX XXX XXX */ |
4282 | aprint_error_dev(sc->sc_dev, |
4283 | "unable to load rx DMA map %d, error = %d\n" , |
4284 | idx, error); |
4285 | panic("wm_add_rxbuf" ); |
4286 | } |
4287 | |
4288 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
4289 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
4290 | |
4291 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4292 | if ((sc->sc_rctl & RCTL_EN) != 0) |
4293 | wm_init_rxdesc(rxq, idx); |
4294 | } else |
4295 | wm_init_rxdesc(rxq, idx); |
4296 | |
4297 | return 0; |
4298 | } |
4299 | |
4300 | /* |
4301 | * wm_rxdrain: |
4302 | * |
4303 | * Drain the receive queue. |
4304 | */ |
4305 | static void |
4306 | wm_rxdrain(struct wm_rxqueue *rxq) |
4307 | { |
4308 | struct wm_softc *sc = rxq->rxq_sc; |
4309 | struct wm_rxsoft *rxs; |
4310 | int i; |
4311 | |
4312 | KASSERT(mutex_owned(rxq->rxq_lock)); |
4313 | |
4314 | for (i = 0; i < WM_NRXDESC; i++) { |
4315 | rxs = &rxq->rxq_soft[i]; |
4316 | if (rxs->rxs_mbuf != NULL) { |
4317 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
4318 | m_freem(rxs->rxs_mbuf); |
4319 | rxs->rxs_mbuf = NULL; |
4320 | } |
4321 | } |
4322 | } |
4323 | |
4324 | |
4325 | /* |
4326 | * XXX copy from FreeBSD's sys/net/rss_config.c |
4327 | */ |
4328 | /* |
4329 | * RSS secret key, intended to prevent attacks on load-balancing. Its |
4330 | * effectiveness may be limited by algorithm choice and available entropy |
4331 | * during the boot. |
4332 | * |
4333 | * XXXRW: And that we don't randomize it yet! |
4334 | * |
4335 | * This is the default Microsoft RSS specification key which is also |
4336 | * the Chelsio T5 firmware default key. |
4337 | */ |
4338 | #define 40 |
4339 | static uint8_t [RSS_KEYSIZE] = { |
4340 | 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, |
4341 | 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, |
4342 | 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, |
4343 | 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, |
4344 | 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa, |
4345 | }; |
4346 | |
4347 | /* |
4348 | * Caller must pass an array of size sizeof(rss_key). |
4349 | * |
4350 | * XXX |
4351 | * As if_ixgbe may use this function, this function should not be |
4352 | * if_wm specific function. |
4353 | */ |
4354 | static void |
4355 | (uint8_t *key) |
4356 | { |
4357 | |
4358 | memcpy(key, wm_rss_key, sizeof(wm_rss_key)); |
4359 | } |
4360 | |
4361 | /* |
4362 | * Setup registers for RSS. |
4363 | * |
4364 | * XXX not yet VMDq support |
4365 | */ |
4366 | static void |
4367 | (struct wm_softc *sc) |
4368 | { |
4369 | uint32_t mrqc, reta_reg, [RSSRK_NUM_REGS]; |
4370 | int i; |
4371 | |
4372 | CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key)); |
4373 | |
4374 | for (i = 0; i < RETA_NUM_ENTRIES; i++) { |
4375 | int qid, reta_ent; |
4376 | |
4377 | qid = i % sc->sc_nqueues; |
4378 | switch(sc->sc_type) { |
4379 | case WM_T_82574: |
4380 | reta_ent = __SHIFTIN(qid, |
4381 | RETA_ENT_QINDEX_MASK_82574); |
4382 | break; |
4383 | case WM_T_82575: |
4384 | reta_ent = __SHIFTIN(qid, |
4385 | RETA_ENT_QINDEX1_MASK_82575); |
4386 | break; |
4387 | default: |
4388 | reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK); |
4389 | break; |
4390 | } |
4391 | |
4392 | reta_reg = CSR_READ(sc, WMREG_RETA_Q(i)); |
4393 | reta_reg &= ~RETA_ENTRY_MASK_Q(i); |
4394 | reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i)); |
4395 | CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg); |
4396 | } |
4397 | |
4398 | wm_rss_getkey((uint8_t *)rss_key); |
4399 | for (i = 0; i < RSSRK_NUM_REGS; i++) |
4400 | CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]); |
4401 | |
4402 | if (sc->sc_type == WM_T_82574) |
4403 | mrqc = MRQC_ENABLE_RSS_MQ_82574; |
4404 | else |
4405 | mrqc = MRQC_ENABLE_RSS_MQ; |
4406 | |
4407 | /* XXXX |
4408 | * The same as FreeBSD igb. |
4409 | * Why doesn't use MRQC_RSS_FIELD_IPV6_EX? |
4410 | */ |
4411 | mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP); |
4412 | mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP); |
4413 | mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP); |
4414 | mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX); |
4415 | |
4416 | CSR_WRITE(sc, WMREG_MRQC, mrqc); |
4417 | } |
4418 | |
4419 | /* |
4420 | * Adjust TX and RX queue numbers which the system actulally uses. |
4421 | * |
4422 | * The numbers are affected by below parameters. |
4423 | * - The nubmer of hardware queues |
4424 | * - The number of MSI-X vectors (= "nvectors" argument) |
4425 | * - ncpu |
4426 | */ |
4427 | static void |
4428 | wm_adjust_qnum(struct wm_softc *sc, int nvectors) |
4429 | { |
4430 | int hw_ntxqueues, hw_nrxqueues, hw_nqueues; |
4431 | |
4432 | if (nvectors < 2) { |
4433 | sc->sc_nqueues = 1; |
4434 | return; |
4435 | } |
4436 | |
4437 | switch(sc->sc_type) { |
4438 | case WM_T_82572: |
4439 | hw_ntxqueues = 2; |
4440 | hw_nrxqueues = 2; |
4441 | break; |
4442 | case WM_T_82574: |
4443 | hw_ntxqueues = 2; |
4444 | hw_nrxqueues = 2; |
4445 | break; |
4446 | case WM_T_82575: |
4447 | hw_ntxqueues = 4; |
4448 | hw_nrxqueues = 4; |
4449 | break; |
4450 | case WM_T_82576: |
4451 | hw_ntxqueues = 16; |
4452 | hw_nrxqueues = 16; |
4453 | break; |
4454 | case WM_T_82580: |
4455 | case WM_T_I350: |
4456 | case WM_T_I354: |
4457 | hw_ntxqueues = 8; |
4458 | hw_nrxqueues = 8; |
4459 | break; |
4460 | case WM_T_I210: |
4461 | hw_ntxqueues = 4; |
4462 | hw_nrxqueues = 4; |
4463 | break; |
4464 | case WM_T_I211: |
4465 | hw_ntxqueues = 2; |
4466 | hw_nrxqueues = 2; |
4467 | break; |
4468 | /* |
4469 | * As below ethernet controllers does not support MSI-X, |
4470 | * this driver let them not use multiqueue. |
4471 | * - WM_T_80003 |
4472 | * - WM_T_ICH8 |
4473 | * - WM_T_ICH9 |
4474 | * - WM_T_ICH10 |
4475 | * - WM_T_PCH |
4476 | * - WM_T_PCH2 |
4477 | * - WM_T_PCH_LPT |
4478 | */ |
4479 | default: |
4480 | hw_ntxqueues = 1; |
4481 | hw_nrxqueues = 1; |
4482 | break; |
4483 | } |
4484 | |
4485 | hw_nqueues = min(hw_ntxqueues, hw_nrxqueues); |
4486 | |
4487 | /* |
4488 | * As queues more than MSI-X vectors cannot improve scaling, we limit |
4489 | * the number of queues used actually. |
4490 | */ |
4491 | if (nvectors < hw_nqueues + 1) { |
4492 | sc->sc_nqueues = nvectors - 1; |
4493 | } else { |
4494 | sc->sc_nqueues = hw_nqueues; |
4495 | } |
4496 | |
4497 | /* |
4498 | * As queues more then cpus cannot improve scaling, we limit |
4499 | * the number of queues used actually. |
4500 | */ |
4501 | if (ncpu < sc->sc_nqueues) |
4502 | sc->sc_nqueues = ncpu; |
4503 | } |
4504 | |
4505 | /* |
4506 | * Both single interrupt MSI and INTx can use this function. |
4507 | */ |
4508 | static int |
4509 | wm_setup_legacy(struct wm_softc *sc) |
4510 | { |
4511 | pci_chipset_tag_t pc = sc->sc_pc; |
4512 | const char *intrstr = NULL; |
4513 | char intrbuf[PCI_INTRSTR_LEN]; |
4514 | int error; |
4515 | |
4516 | error = wm_alloc_txrx_queues(sc); |
4517 | if (error) { |
4518 | aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n" , |
4519 | error); |
4520 | return ENOMEM; |
4521 | } |
4522 | intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf, |
4523 | sizeof(intrbuf)); |
4524 | #ifdef WM_MPSAFE |
4525 | pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true); |
4526 | #endif |
4527 | sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0], |
4528 | IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev)); |
4529 | if (sc->sc_ihs[0] == NULL) { |
4530 | aprint_error_dev(sc->sc_dev,"unable to establish %s\n" , |
4531 | (pci_intr_type(pc, sc->sc_intrs[0]) |
4532 | == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx" ); |
4533 | return ENOMEM; |
4534 | } |
4535 | |
4536 | aprint_normal_dev(sc->sc_dev, "interrupting at %s\n" , intrstr); |
4537 | sc->sc_nintrs = 1; |
4538 | return 0; |
4539 | } |
4540 | |
4541 | static int |
4542 | wm_setup_msix(struct wm_softc *sc) |
4543 | { |
4544 | void *vih; |
4545 | kcpuset_t *affinity; |
4546 | int qidx, error, intr_idx, txrx_established; |
4547 | pci_chipset_tag_t pc = sc->sc_pc; |
4548 | const char *intrstr = NULL; |
4549 | char intrbuf[PCI_INTRSTR_LEN]; |
4550 | char intr_xname[INTRDEVNAMEBUF]; |
4551 | |
4552 | if (sc->sc_nqueues < ncpu) { |
4553 | /* |
4554 | * To avoid other devices' interrupts, the affinity of Tx/Rx |
4555 | * interrupts start from CPU#1. |
4556 | */ |
4557 | sc->sc_affinity_offset = 1; |
4558 | } else { |
4559 | /* |
4560 | * In this case, this device use all CPUs. So, we unify |
4561 | * affinitied cpu_index to msix vector number for readability. |
4562 | */ |
4563 | sc->sc_affinity_offset = 0; |
4564 | } |
4565 | |
4566 | error = wm_alloc_txrx_queues(sc); |
4567 | if (error) { |
4568 | aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n" , |
4569 | error); |
4570 | return ENOMEM; |
4571 | } |
4572 | |
4573 | kcpuset_create(&affinity, false); |
4574 | intr_idx = 0; |
4575 | |
4576 | /* |
4577 | * TX and RX |
4578 | */ |
4579 | txrx_established = 0; |
4580 | for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { |
4581 | struct wm_queue *wmq = &sc->sc_queue[qidx]; |
4582 | int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu; |
4583 | |
4584 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, |
4585 | sizeof(intrbuf)); |
4586 | #ifdef WM_MPSAFE |
4587 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], |
4588 | PCI_INTR_MPSAFE, true); |
4589 | #endif |
4590 | memset(intr_xname, 0, sizeof(intr_xname)); |
4591 | snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d" , |
4592 | device_xname(sc->sc_dev), qidx); |
4593 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], |
4594 | IPL_NET, wm_txrxintr_msix, wmq, intr_xname); |
4595 | if (vih == NULL) { |
4596 | aprint_error_dev(sc->sc_dev, |
4597 | "unable to establish MSI-X(for TX and RX)%s%s\n" , |
4598 | intrstr ? " at " : "" , |
4599 | intrstr ? intrstr : "" ); |
4600 | |
4601 | goto fail; |
4602 | } |
4603 | kcpuset_zero(affinity); |
4604 | /* Round-robin affinity */ |
4605 | kcpuset_set(affinity, affinity_to); |
4606 | error = interrupt_distribute(vih, affinity, NULL); |
4607 | if (error == 0) { |
4608 | aprint_normal_dev(sc->sc_dev, |
4609 | "for TX and RX interrupting at %s affinity to %u\n" , |
4610 | intrstr, affinity_to); |
4611 | } else { |
4612 | aprint_normal_dev(sc->sc_dev, |
4613 | "for TX and RX interrupting at %s\n" , intrstr); |
4614 | } |
4615 | sc->sc_ihs[intr_idx] = vih; |
4616 | wmq->wmq_id= qidx; |
4617 | wmq->wmq_intr_idx = intr_idx; |
4618 | |
4619 | txrx_established++; |
4620 | intr_idx++; |
4621 | } |
4622 | |
4623 | /* |
4624 | * LINK |
4625 | */ |
4626 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, |
4627 | sizeof(intrbuf)); |
4628 | #ifdef WM_MPSAFE |
4629 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true); |
4630 | #endif |
4631 | memset(intr_xname, 0, sizeof(intr_xname)); |
4632 | snprintf(intr_xname, sizeof(intr_xname), "%sLINK" , |
4633 | device_xname(sc->sc_dev)); |
4634 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], |
4635 | IPL_NET, wm_linkintr_msix, sc, intr_xname); |
4636 | if (vih == NULL) { |
4637 | aprint_error_dev(sc->sc_dev, |
4638 | "unable to establish MSI-X(for LINK)%s%s\n" , |
4639 | intrstr ? " at " : "" , |
4640 | intrstr ? intrstr : "" ); |
4641 | |
4642 | goto fail; |
4643 | } |
4644 | /* keep default affinity to LINK interrupt */ |
4645 | aprint_normal_dev(sc->sc_dev, |
4646 | "for LINK interrupting at %s\n" , intrstr); |
4647 | sc->sc_ihs[intr_idx] = vih; |
4648 | sc->sc_link_intr_idx = intr_idx; |
4649 | |
4650 | sc->sc_nintrs = sc->sc_nqueues + 1; |
4651 | kcpuset_destroy(affinity); |
4652 | return 0; |
4653 | |
4654 | fail: |
4655 | for (qidx = 0; qidx < txrx_established; qidx++) { |
4656 | struct wm_queue *wmq = &sc->sc_queue[qidx]; |
4657 | pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]); |
4658 | sc->sc_ihs[wmq->wmq_intr_idx] = NULL; |
4659 | } |
4660 | |
4661 | kcpuset_destroy(affinity); |
4662 | return ENOMEM; |
4663 | } |
4664 | |
4665 | static void |
4666 | wm_turnon(struct wm_softc *sc) |
4667 | { |
4668 | int i; |
4669 | |
4670 | KASSERT(WM_CORE_LOCKED(sc)); |
4671 | |
4672 | for(i = 0; i < sc->sc_nqueues; i++) { |
4673 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
4674 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
4675 | |
4676 | mutex_enter(txq->txq_lock); |
4677 | txq->txq_stopping = false; |
4678 | mutex_exit(txq->txq_lock); |
4679 | |
4680 | mutex_enter(rxq->rxq_lock); |
4681 | rxq->rxq_stopping = false; |
4682 | mutex_exit(rxq->rxq_lock); |
4683 | } |
4684 | |
4685 | sc->sc_core_stopping = false; |
4686 | } |
4687 | |
4688 | static void |
4689 | wm_turnoff(struct wm_softc *sc) |
4690 | { |
4691 | int i; |
4692 | |
4693 | KASSERT(WM_CORE_LOCKED(sc)); |
4694 | |
4695 | sc->sc_core_stopping = true; |
4696 | |
4697 | for(i = 0; i < sc->sc_nqueues; i++) { |
4698 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
4699 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
4700 | |
4701 | mutex_enter(rxq->rxq_lock); |
4702 | rxq->rxq_stopping = true; |
4703 | mutex_exit(rxq->rxq_lock); |
4704 | |
4705 | mutex_enter(txq->txq_lock); |
4706 | txq->txq_stopping = true; |
4707 | mutex_exit(txq->txq_lock); |
4708 | } |
4709 | } |
4710 | |
4711 | /* |
4712 | * wm_init: [ifnet interface function] |
4713 | * |
4714 | * Initialize the interface. |
4715 | */ |
4716 | static int |
4717 | wm_init(struct ifnet *ifp) |
4718 | { |
4719 | struct wm_softc *sc = ifp->if_softc; |
4720 | int ret; |
4721 | |
4722 | WM_CORE_LOCK(sc); |
4723 | ret = wm_init_locked(ifp); |
4724 | WM_CORE_UNLOCK(sc); |
4725 | |
4726 | return ret; |
4727 | } |
4728 | |
4729 | static int |
4730 | wm_init_locked(struct ifnet *ifp) |
4731 | { |
4732 | struct wm_softc *sc = ifp->if_softc; |
4733 | int i, j, trynum, error = 0; |
4734 | uint32_t reg; |
4735 | |
4736 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
4737 | device_xname(sc->sc_dev), __func__)); |
4738 | KASSERT(WM_CORE_LOCKED(sc)); |
4739 | |
4740 | /* |
4741 | * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. |
4742 | * There is a small but measurable benefit to avoiding the adjusment |
4743 | * of the descriptor so that the headers are aligned, for normal mtu, |
4744 | * on such platforms. One possibility is that the DMA itself is |
4745 | * slightly more efficient if the front of the entire packet (instead |
4746 | * of the front of the headers) is aligned. |
4747 | * |
4748 | * Note we must always set align_tweak to 0 if we are using |
4749 | * jumbo frames. |
4750 | */ |
4751 | #ifdef __NO_STRICT_ALIGNMENT |
4752 | sc->sc_align_tweak = 0; |
4753 | #else |
4754 | if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) |
4755 | sc->sc_align_tweak = 0; |
4756 | else |
4757 | sc->sc_align_tweak = 2; |
4758 | #endif /* __NO_STRICT_ALIGNMENT */ |
4759 | |
4760 | /* Cancel any pending I/O. */ |
4761 | wm_stop_locked(ifp, 0); |
4762 | |
4763 | /* update statistics before reset */ |
4764 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); |
4765 | ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); |
4766 | |
4767 | /* PCH_SPT hardware workaround */ |
4768 | if (sc->sc_type == WM_T_PCH_SPT) |
4769 | wm_flush_desc_rings(sc); |
4770 | |
4771 | /* Reset the chip to a known state. */ |
4772 | wm_reset(sc); |
4773 | |
4774 | /* AMT based hardware can now take control from firmware */ |
4775 | if ((sc->sc_flags & WM_F_HAS_AMT) != 0) |
4776 | wm_get_hw_control(sc); |
4777 | |
4778 | /* Init hardware bits */ |
4779 | wm_initialize_hardware_bits(sc); |
4780 | |
4781 | /* Reset the PHY. */ |
4782 | if (sc->sc_flags & WM_F_HAS_MII) |
4783 | wm_gmii_reset(sc); |
4784 | |
4785 | /* Calculate (E)ITR value */ |
4786 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4787 | sc->sc_itr = 450; /* For EITR */ |
4788 | } else if (sc->sc_type >= WM_T_82543) { |
4789 | /* |
4790 | * Set up the interrupt throttling register (units of 256ns) |
4791 | * Note that a footnote in Intel's documentation says this |
4792 | * ticker runs at 1/4 the rate when the chip is in 100Mbit |
4793 | * or 10Mbit mode. Empirically, it appears to be the case |
4794 | * that that is also true for the 1024ns units of the other |
4795 | * interrupt-related timer registers -- so, really, we ought |
4796 | * to divide this value by 4 when the link speed is low. |
4797 | * |
4798 | * XXX implement this division at link speed change! |
4799 | */ |
4800 | |
4801 | /* |
4802 | * For N interrupts/sec, set this value to: |
4803 | * 1000000000 / (N * 256). Note that we set the |
4804 | * absolute and packet timer values to this value |
4805 | * divided by 4 to get "simple timer" behavior. |
4806 | */ |
4807 | |
4808 | sc->sc_itr = 1500; /* 2604 ints/sec */ |
4809 | } |
4810 | |
4811 | error = wm_init_txrx_queues(sc); |
4812 | if (error) |
4813 | goto out; |
4814 | |
4815 | /* |
4816 | * Clear out the VLAN table -- we don't use it (yet). |
4817 | */ |
4818 | CSR_WRITE(sc, WMREG_VET, 0); |
4819 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) |
4820 | trynum = 10; /* Due to hw errata */ |
4821 | else |
4822 | trynum = 1; |
4823 | for (i = 0; i < WM_VLAN_TABSIZE; i++) |
4824 | for (j = 0; j < trynum; j++) |
4825 | CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); |
4826 | |
4827 | /* |
4828 | * Set up flow-control parameters. |
4829 | * |
4830 | * XXX Values could probably stand some tuning. |
4831 | */ |
4832 | if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) |
4833 | && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) |
4834 | && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT) |
4835 | && (sc->sc_type != WM_T_PCH_SPT)) { |
4836 | CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); |
4837 | CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); |
4838 | CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); |
4839 | } |
4840 | |
4841 | sc->sc_fcrtl = FCRTL_DFLT; |
4842 | if (sc->sc_type < WM_T_82543) { |
4843 | CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); |
4844 | CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); |
4845 | } else { |
4846 | CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); |
4847 | CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); |
4848 | } |
4849 | |
4850 | if (sc->sc_type == WM_T_80003) |
4851 | CSR_WRITE(sc, WMREG_FCTTV, 0xffff); |
4852 | else |
4853 | CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); |
4854 | |
4855 | /* Writes the control register. */ |
4856 | wm_set_vlan(sc); |
4857 | |
4858 | if (sc->sc_flags & WM_F_HAS_MII) { |
4859 | int val; |
4860 | |
4861 | switch (sc->sc_type) { |
4862 | case WM_T_80003: |
4863 | case WM_T_ICH8: |
4864 | case WM_T_ICH9: |
4865 | case WM_T_ICH10: |
4866 | case WM_T_PCH: |
4867 | case WM_T_PCH2: |
4868 | case WM_T_PCH_LPT: |
4869 | case WM_T_PCH_SPT: |
4870 | /* |
4871 | * Set the mac to wait the maximum time between each |
4872 | * iteration and increase the max iterations when |
4873 | * polling the phy; this fixes erroneous timeouts at |
4874 | * 10Mbps. |
4875 | */ |
4876 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, |
4877 | 0xFFFF); |
4878 | val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM); |
4879 | val |= 0x3F; |
4880 | wm_kmrn_writereg(sc, |
4881 | KUMCTRLSTA_OFFSET_INB_PARAM, val); |
4882 | break; |
4883 | default: |
4884 | break; |
4885 | } |
4886 | |
4887 | if (sc->sc_type == WM_T_80003) { |
4888 | val = CSR_READ(sc, WMREG_CTRL_EXT); |
4889 | val &= ~CTRL_EXT_LINK_MODE_MASK; |
4890 | CSR_WRITE(sc, WMREG_CTRL_EXT, val); |
4891 | |
4892 | /* Bypass RX and TX FIFO's */ |
4893 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, |
4894 | KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
4895 | | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); |
4896 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, |
4897 | KUMCTRLSTA_INB_CTRL_DIS_PADDING | |
4898 | KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); |
4899 | } |
4900 | } |
4901 | #if 0 |
4902 | CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); |
4903 | #endif |
4904 | |
4905 | /* Set up checksum offload parameters. */ |
4906 | reg = CSR_READ(sc, WMREG_RXCSUM); |
4907 | reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); |
4908 | if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) |
4909 | reg |= RXCSUM_IPOFL; |
4910 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) |
4911 | reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; |
4912 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) |
4913 | reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; |
4914 | CSR_WRITE(sc, WMREG_RXCSUM, reg); |
4915 | |
4916 | /* Set up MSI-X */ |
4917 | if (sc->sc_nintrs > 1) { |
4918 | uint32_t ivar; |
4919 | struct wm_queue *wmq; |
4920 | int qid, qintr_idx; |
4921 | |
4922 | if (sc->sc_type == WM_T_82575) { |
4923 | /* Interrupt control */ |
4924 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
4925 | reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR; |
4926 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4927 | |
4928 | /* TX and RX */ |
4929 | for (i = 0; i < sc->sc_nqueues; i++) { |
4930 | wmq = &sc->sc_queue[i]; |
4931 | CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx), |
4932 | EITR_TX_QUEUE(wmq->wmq_id) |
4933 | | EITR_RX_QUEUE(wmq->wmq_id)); |
4934 | } |
4935 | /* Link status */ |
4936 | CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx), |
4937 | EITR_OTHER); |
4938 | } else if (sc->sc_type == WM_T_82574) { |
4939 | /* Interrupt control */ |
4940 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
4941 | reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME; |
4942 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4943 | |
4944 | ivar = 0; |
4945 | /* TX and RX */ |
4946 | for (i = 0; i < sc->sc_nqueues; i++) { |
4947 | wmq = &sc->sc_queue[i]; |
4948 | qid = wmq->wmq_id; |
4949 | qintr_idx = wmq->wmq_intr_idx; |
4950 | |
4951 | ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), |
4952 | IVAR_TX_MASK_Q_82574(qid)); |
4953 | ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), |
4954 | IVAR_RX_MASK_Q_82574(qid)); |
4955 | } |
4956 | /* Link status */ |
4957 | ivar |= __SHIFTIN((IVAR_VALID_82574 |
4958 | | sc->sc_link_intr_idx), IVAR_OTHER_MASK); |
4959 | CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB); |
4960 | } else { |
4961 | /* Interrupt control */ |
4962 | CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX |
4963 | | GPIE_EIAME | GPIE_PBA); |
4964 | |
4965 | switch (sc->sc_type) { |
4966 | case WM_T_82580: |
4967 | case WM_T_I350: |
4968 | case WM_T_I354: |
4969 | case WM_T_I210: |
4970 | case WM_T_I211: |
4971 | /* TX and RX */ |
4972 | for (i = 0; i < sc->sc_nqueues; i++) { |
4973 | wmq = &sc->sc_queue[i]; |
4974 | qid = wmq->wmq_id; |
4975 | qintr_idx = wmq->wmq_intr_idx; |
4976 | |
4977 | ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); |
4978 | ivar &= ~IVAR_TX_MASK_Q(qid); |
4979 | ivar |= __SHIFTIN((qintr_idx |
4980 | | IVAR_VALID), |
4981 | IVAR_TX_MASK_Q(qid)); |
4982 | ivar &= ~IVAR_RX_MASK_Q(qid); |
4983 | ivar |= __SHIFTIN((qintr_idx |
4984 | | IVAR_VALID), |
4985 | IVAR_RX_MASK_Q(qid)); |
4986 | CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); |
4987 | } |
4988 | break; |
4989 | case WM_T_82576: |
4990 | /* TX and RX */ |
4991 | for (i = 0; i < sc->sc_nqueues; i++) { |
4992 | wmq = &sc->sc_queue[i]; |
4993 | qid = wmq->wmq_id; |
4994 | qintr_idx = wmq->wmq_intr_idx; |
4995 | |
4996 | ivar = CSR_READ(sc, |
4997 | WMREG_IVAR_Q_82576(qid)); |
4998 | ivar &= ~IVAR_TX_MASK_Q_82576(qid); |
4999 | ivar |= __SHIFTIN((qintr_idx |
5000 | | IVAR_VALID), |
5001 | IVAR_TX_MASK_Q_82576(qid)); |
5002 | ivar &= ~IVAR_RX_MASK_Q_82576(qid); |
5003 | ivar |= __SHIFTIN((qintr_idx |
5004 | | IVAR_VALID), |
5005 | IVAR_RX_MASK_Q_82576(qid)); |
5006 | CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), |
5007 | ivar); |
5008 | } |
5009 | break; |
5010 | default: |
5011 | break; |
5012 | } |
5013 | |
5014 | /* Link status */ |
5015 | ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID), |
5016 | IVAR_MISC_OTHER); |
5017 | CSR_WRITE(sc, WMREG_IVAR_MISC, ivar); |
5018 | } |
5019 | |
5020 | if (sc->sc_nqueues > 1) { |
5021 | wm_init_rss(sc); |
5022 | |
5023 | /* |
5024 | ** NOTE: Receive Full-Packet Checksum Offload |
5025 | ** is mutually exclusive with Multiqueue. However |
5026 | ** this is not the same as TCP/IP checksums which |
5027 | ** still work. |
5028 | */ |
5029 | reg = CSR_READ(sc, WMREG_RXCSUM); |
5030 | reg |= RXCSUM_PCSD; |
5031 | CSR_WRITE(sc, WMREG_RXCSUM, reg); |
5032 | } |
5033 | } |
5034 | |
5035 | /* Set up the interrupt registers. */ |
5036 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
5037 | sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | |
5038 | ICR_RXO | ICR_RXT0; |
5039 | if (sc->sc_nintrs > 1) { |
5040 | uint32_t mask; |
5041 | struct wm_queue *wmq; |
5042 | |
5043 | switch (sc->sc_type) { |
5044 | case WM_T_82574: |
5045 | CSR_WRITE(sc, WMREG_EIAC_82574, |
5046 | WMREG_EIAC_82574_MSIX_MASK); |
5047 | sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK; |
5048 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); |
5049 | break; |
5050 | default: |
5051 | if (sc->sc_type == WM_T_82575) { |
5052 | mask = 0; |
5053 | for (i = 0; i < sc->sc_nqueues; i++) { |
5054 | wmq = &sc->sc_queue[i]; |
5055 | mask |= EITR_TX_QUEUE(wmq->wmq_id); |
5056 | mask |= EITR_RX_QUEUE(wmq->wmq_id); |
5057 | } |
5058 | mask |= EITR_OTHER; |
5059 | } else { |
5060 | mask = 0; |
5061 | for (i = 0; i < sc->sc_nqueues; i++) { |
5062 | wmq = &sc->sc_queue[i]; |
5063 | mask |= 1 << wmq->wmq_intr_idx; |
5064 | } |
5065 | mask |= 1 << sc->sc_link_intr_idx; |
5066 | } |
5067 | CSR_WRITE(sc, WMREG_EIAC, mask); |
5068 | CSR_WRITE(sc, WMREG_EIAM, mask); |
5069 | CSR_WRITE(sc, WMREG_EIMS, mask); |
5070 | CSR_WRITE(sc, WMREG_IMS, ICR_LSC); |
5071 | break; |
5072 | } |
5073 | } else |
5074 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); |
5075 | |
5076 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
5077 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
5078 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) |
5079 | || (sc->sc_type == WM_T_PCH_SPT)) { |
5080 | reg = CSR_READ(sc, WMREG_KABGTXD); |
5081 | reg |= KABGTXD_BGSQLBIAS; |
5082 | CSR_WRITE(sc, WMREG_KABGTXD, reg); |
5083 | } |
5084 | |
5085 | /* Set up the inter-packet gap. */ |
5086 | CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); |
5087 | |
5088 | if (sc->sc_type >= WM_T_82543) { |
5089 | /* |
5090 | * XXX 82574 has both ITR and EITR. SET EITR when we use |
5091 | * the multi queue function with MSI-X. |
5092 | */ |
5093 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
5094 | int qidx; |
5095 | for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { |
5096 | struct wm_queue *wmq = &sc->sc_queue[qidx]; |
5097 | CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), |
5098 | sc->sc_itr); |
5099 | } |
5100 | /* |
5101 | * Link interrupts occur much less than TX |
5102 | * interrupts and RX interrupts. So, we don't |
5103 | * tune EINTR(WM_MSIX_LINKINTR_IDX) value like |
5104 | * FreeBSD's if_igb. |
5105 | */ |
5106 | } else |
5107 | CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); |
5108 | } |
5109 | |
5110 | /* Set the VLAN ethernetype. */ |
5111 | CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); |
5112 | |
5113 | /* |
5114 | * Set up the transmit control register; we start out with |
5115 | * a collision distance suitable for FDX, but update it whe |
5116 | * we resolve the media type. |
5117 | */ |
5118 | sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC |
5119 | | TCTL_CT(TX_COLLISION_THRESHOLD) |
5120 | | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
5121 | if (sc->sc_type >= WM_T_82571) |
5122 | sc->sc_tctl |= TCTL_MULR; |
5123 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
5124 | |
5125 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
5126 | /* Write TDT after TCTL.EN is set. See the document. */ |
5127 | CSR_WRITE(sc, WMREG_TDT(0), 0); |
5128 | } |
5129 | |
5130 | if (sc->sc_type == WM_T_80003) { |
5131 | reg = CSR_READ(sc, WMREG_TCTL_EXT); |
5132 | reg &= ~TCTL_EXT_GCEX_MASK; |
5133 | reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; |
5134 | CSR_WRITE(sc, WMREG_TCTL_EXT, reg); |
5135 | } |
5136 | |
5137 | /* Set the media. */ |
5138 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) |
5139 | goto out; |
5140 | |
5141 | /* Configure for OS presence */ |
5142 | wm_init_manageability(sc); |
5143 | |
5144 | /* |
5145 | * Set up the receive control register; we actually program |
5146 | * the register when we set the receive filter. Use multicast |
5147 | * address offset type 0. |
5148 | * |
5149 | * Only the i82544 has the ability to strip the incoming |
5150 | * CRC, so we don't enable that feature. |
5151 | */ |
5152 | sc->sc_mchash_type = 0; |
5153 | sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF |
5154 | | RCTL_MO(sc->sc_mchash_type); |
5155 | |
5156 | /* |
5157 | * The I350 has a bug where it always strips the CRC whether |
5158 | * asked to or not. So ask for stripped CRC here and cope in rxeof |
5159 | */ |
5160 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) |
5161 | || (sc->sc_type == WM_T_I210)) |
5162 | sc->sc_rctl |= RCTL_SECRC; |
5163 | |
5164 | if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) |
5165 | && (ifp->if_mtu > ETHERMTU)) { |
5166 | sc->sc_rctl |= RCTL_LPE; |
5167 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
5168 | CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); |
5169 | } |
5170 | |
5171 | if (MCLBYTES == 2048) { |
5172 | sc->sc_rctl |= RCTL_2k; |
5173 | } else { |
5174 | if (sc->sc_type >= WM_T_82543) { |
5175 | switch (MCLBYTES) { |
5176 | case 4096: |
5177 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; |
5178 | break; |
5179 | case 8192: |
5180 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; |
5181 | break; |
5182 | case 16384: |
5183 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; |
5184 | break; |
5185 | default: |
5186 | panic("wm_init: MCLBYTES %d unsupported" , |
5187 | MCLBYTES); |
5188 | break; |
5189 | } |
5190 | } else panic("wm_init: i82542 requires MCLBYTES = 2048" ); |
5191 | } |
5192 | |
5193 | /* Set the receive filter. */ |
5194 | wm_set_filter(sc); |
5195 | |
5196 | /* Enable ECC */ |
5197 | switch (sc->sc_type) { |
5198 | case WM_T_82571: |
5199 | reg = CSR_READ(sc, WMREG_PBA_ECC); |
5200 | reg |= PBA_ECC_CORR_EN; |
5201 | CSR_WRITE(sc, WMREG_PBA_ECC, reg); |
5202 | break; |
5203 | case WM_T_PCH_LPT: |
5204 | case WM_T_PCH_SPT: |
5205 | reg = CSR_READ(sc, WMREG_PBECCSTS); |
5206 | reg |= PBECCSTS_UNCORR_ECC_ENABLE; |
5207 | CSR_WRITE(sc, WMREG_PBECCSTS, reg); |
5208 | |
5209 | sc->sc_ctrl |= CTRL_MEHE; |
5210 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
5211 | break; |
5212 | default: |
5213 | break; |
5214 | } |
5215 | |
5216 | /* On 575 and later set RDT only if RX enabled */ |
5217 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
5218 | int qidx; |
5219 | for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { |
5220 | struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq; |
5221 | for (i = 0; i < WM_NRXDESC; i++) { |
5222 | mutex_enter(rxq->rxq_lock); |
5223 | wm_init_rxdesc(rxq, i); |
5224 | mutex_exit(rxq->rxq_lock); |
5225 | |
5226 | } |
5227 | } |
5228 | } |
5229 | |
5230 | wm_turnon(sc); |
5231 | |
5232 | /* Start the one second link check clock. */ |
5233 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); |
5234 | |
5235 | /* ...all done! */ |
5236 | ifp->if_flags |= IFF_RUNNING; |
5237 | ifp->if_flags &= ~IFF_OACTIVE; |
5238 | |
5239 | out: |
5240 | sc->sc_if_flags = ifp->if_flags; |
5241 | if (error) |
5242 | log(LOG_ERR, "%s: interface not running\n" , |
5243 | device_xname(sc->sc_dev)); |
5244 | return error; |
5245 | } |
5246 | |
5247 | /* |
5248 | * wm_stop: [ifnet interface function] |
5249 | * |
5250 | * Stop transmission on the interface. |
5251 | */ |
5252 | static void |
5253 | wm_stop(struct ifnet *ifp, int disable) |
5254 | { |
5255 | struct wm_softc *sc = ifp->if_softc; |
5256 | |
5257 | WM_CORE_LOCK(sc); |
5258 | wm_stop_locked(ifp, disable); |
5259 | WM_CORE_UNLOCK(sc); |
5260 | } |
5261 | |
5262 | static void |
5263 | wm_stop_locked(struct ifnet *ifp, int disable) |
5264 | { |
5265 | struct wm_softc *sc = ifp->if_softc; |
5266 | struct wm_txsoft *txs; |
5267 | int i, qidx; |
5268 | |
5269 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
5270 | device_xname(sc->sc_dev), __func__)); |
5271 | KASSERT(WM_CORE_LOCKED(sc)); |
5272 | |
5273 | wm_turnoff(sc); |
5274 | |
5275 | /* Stop the one second clock. */ |
5276 | callout_stop(&sc->sc_tick_ch); |
5277 | |
5278 | /* Stop the 82547 Tx FIFO stall check timer. */ |
5279 | if (sc->sc_type == WM_T_82547) |
5280 | callout_stop(&sc->sc_txfifo_ch); |
5281 | |
5282 | if (sc->sc_flags & WM_F_HAS_MII) { |
5283 | /* Down the MII. */ |
5284 | mii_down(&sc->sc_mii); |
5285 | } else { |
5286 | #if 0 |
5287 | /* Should we clear PHY's status properly? */ |
5288 | wm_reset(sc); |
5289 | #endif |
5290 | } |
5291 | |
5292 | /* Stop the transmit and receive processes. */ |
5293 | CSR_WRITE(sc, WMREG_TCTL, 0); |
5294 | CSR_WRITE(sc, WMREG_RCTL, 0); |
5295 | sc->sc_rctl &= ~RCTL_EN; |
5296 | |
5297 | /* |
5298 | * Clear the interrupt mask to ensure the device cannot assert its |
5299 | * interrupt line. |
5300 | * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to |
5301 | * service any currently pending or shared interrupt. |
5302 | */ |
5303 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
5304 | sc->sc_icr = 0; |
5305 | if (sc->sc_nintrs > 1) { |
5306 | if (sc->sc_type != WM_T_82574) { |
5307 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); |
5308 | CSR_WRITE(sc, WMREG_EIAC, 0); |
5309 | } else |
5310 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); |
5311 | } |
5312 | |
5313 | /* Release any queued transmit buffers. */ |
5314 | for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { |
5315 | struct wm_queue *wmq = &sc->sc_queue[qidx]; |
5316 | struct wm_txqueue *txq = &wmq->wmq_txq; |
5317 | mutex_enter(txq->txq_lock); |
5318 | for (i = 0; i < WM_TXQUEUELEN(txq); i++) { |
5319 | txs = &txq->txq_soft[i]; |
5320 | if (txs->txs_mbuf != NULL) { |
5321 | bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap); |
5322 | m_freem(txs->txs_mbuf); |
5323 | txs->txs_mbuf = NULL; |
5324 | } |
5325 | } |
5326 | mutex_exit(txq->txq_lock); |
5327 | } |
5328 | |
5329 | /* Mark the interface as down and cancel the watchdog timer. */ |
5330 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
5331 | ifp->if_timer = 0; |
5332 | |
5333 | if (disable) { |
5334 | for (i = 0; i < sc->sc_nqueues; i++) { |
5335 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
5336 | mutex_enter(rxq->rxq_lock); |
5337 | wm_rxdrain(rxq); |
5338 | mutex_exit(rxq->rxq_lock); |
5339 | } |
5340 | } |
5341 | |
5342 | #if 0 /* notyet */ |
5343 | if (sc->sc_type >= WM_T_82544) |
5344 | CSR_WRITE(sc, WMREG_WUC, 0); |
5345 | #endif |
5346 | } |
5347 | |
5348 | static void |
5349 | wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) |
5350 | { |
5351 | struct mbuf *m; |
5352 | int i; |
5353 | |
5354 | log(LOG_DEBUG, "%s: mbuf chain:\n" , device_xname(sc->sc_dev)); |
5355 | for (m = m0, i = 0; m != NULL; m = m->m_next, i++) |
5356 | log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " |
5357 | "m_flags = 0x%08x\n" , device_xname(sc->sc_dev), |
5358 | m->m_data, m->m_len, m->m_flags); |
5359 | log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n" , device_xname(sc->sc_dev), |
5360 | i, i == 1 ? "" : "s" ); |
5361 | } |
5362 | |
5363 | /* |
5364 | * wm_82547_txfifo_stall: |
5365 | * |
5366 | * Callout used to wait for the 82547 Tx FIFO to drain, |
5367 | * reset the FIFO pointers, and restart packet transmission. |
5368 | */ |
5369 | static void |
5370 | wm_82547_txfifo_stall(void *arg) |
5371 | { |
5372 | struct wm_softc *sc = arg; |
5373 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
5374 | |
5375 | mutex_enter(txq->txq_lock); |
5376 | |
5377 | if (txq->txq_stopping) |
5378 | goto out; |
5379 | |
5380 | if (txq->txq_fifo_stall) { |
5381 | if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) && |
5382 | CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && |
5383 | CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { |
5384 | /* |
5385 | * Packets have drained. Stop transmitter, reset |
5386 | * FIFO pointers, restart transmitter, and kick |
5387 | * the packet queue. |
5388 | */ |
5389 | uint32_t tctl = CSR_READ(sc, WMREG_TCTL); |
5390 | CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); |
5391 | CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr); |
5392 | CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr); |
5393 | CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr); |
5394 | CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr); |
5395 | CSR_WRITE(sc, WMREG_TCTL, tctl); |
5396 | CSR_WRITE_FLUSH(sc); |
5397 | |
5398 | txq->txq_fifo_head = 0; |
5399 | txq->txq_fifo_stall = 0; |
5400 | wm_start_locked(&sc->sc_ethercom.ec_if); |
5401 | } else { |
5402 | /* |
5403 | * Still waiting for packets to drain; try again in |
5404 | * another tick. |
5405 | */ |
5406 | callout_schedule(&sc->sc_txfifo_ch, 1); |
5407 | } |
5408 | } |
5409 | |
5410 | out: |
5411 | mutex_exit(txq->txq_lock); |
5412 | } |
5413 | |
5414 | /* |
5415 | * wm_82547_txfifo_bugchk: |
5416 | * |
5417 | * Check for bug condition in the 82547 Tx FIFO. We need to |
5418 | * prevent enqueueing a packet that would wrap around the end |
5419 | * if the Tx FIFO ring buffer, otherwise the chip will croak. |
5420 | * |
5421 | * We do this by checking the amount of space before the end |
5422 | * of the Tx FIFO buffer. If the packet will not fit, we "stall" |
5423 | * the Tx FIFO, wait for all remaining packets to drain, reset |
5424 | * the internal FIFO pointers to the beginning, and restart |
5425 | * transmission on the interface. |
5426 | */ |
5427 | #define WM_FIFO_HDR 0x10 |
5428 | #define WM_82547_PAD_LEN 0x3e0 |
5429 | static int |
5430 | wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) |
5431 | { |
5432 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
5433 | int space = txq->txq_fifo_size - txq->txq_fifo_head; |
5434 | int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); |
5435 | |
5436 | /* Just return if already stalled. */ |
5437 | if (txq->txq_fifo_stall) |
5438 | return 1; |
5439 | |
5440 | if (sc->sc_mii.mii_media_active & IFM_FDX) { |
5441 | /* Stall only occurs in half-duplex mode. */ |
5442 | goto send_packet; |
5443 | } |
5444 | |
5445 | if (len >= WM_82547_PAD_LEN + space) { |
5446 | txq->txq_fifo_stall = 1; |
5447 | callout_schedule(&sc->sc_txfifo_ch, 1); |
5448 | return 1; |
5449 | } |
5450 | |
5451 | send_packet: |
5452 | txq->txq_fifo_head += len; |
5453 | if (txq->txq_fifo_head >= txq->txq_fifo_size) |
5454 | txq->txq_fifo_head -= txq->txq_fifo_size; |
5455 | |
5456 | return 0; |
5457 | } |
5458 | |
5459 | static int |
5460 | wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) |
5461 | { |
5462 | int error; |
5463 | |
5464 | /* |
5465 | * Allocate the control data structures, and create and load the |
5466 | * DMA map for it. |
5467 | * |
5468 | * NOTE: All Tx descriptors must be in the same 4G segment of |
5469 | * memory. So must Rx descriptors. We simplify by allocating |
5470 | * both sets within the same 4G segment. |
5471 | */ |
5472 | if (sc->sc_type < WM_T_82544) |
5473 | WM_NTXDESC(txq) = WM_NTXDESC_82542; |
5474 | else |
5475 | WM_NTXDESC(txq) = WM_NTXDESC_82544; |
5476 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
5477 | txq->txq_descsize = sizeof(nq_txdesc_t); |
5478 | else |
5479 | txq->txq_descsize = sizeof(wiseman_txdesc_t); |
5480 | |
5481 | if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq), |
5482 | PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, |
5483 | 1, &txq->txq_desc_rseg, 0)) != 0) { |
5484 | aprint_error_dev(sc->sc_dev, |
5485 | "unable to allocate TX control data, error = %d\n" , |
5486 | error); |
5487 | goto fail_0; |
5488 | } |
5489 | |
5490 | if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg, |
5491 | txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq), |
5492 | (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) { |
5493 | aprint_error_dev(sc->sc_dev, |
5494 | "unable to map TX control data, error = %d\n" , error); |
5495 | goto fail_1; |
5496 | } |
5497 | |
5498 | if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1, |
5499 | WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) { |
5500 | aprint_error_dev(sc->sc_dev, |
5501 | "unable to create TX control data DMA map, error = %d\n" , |
5502 | error); |
5503 | goto fail_2; |
5504 | } |
5505 | |
5506 | if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap, |
5507 | txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) { |
5508 | aprint_error_dev(sc->sc_dev, |
5509 | "unable to load TX control data DMA map, error = %d\n" , |
5510 | error); |
5511 | goto fail_3; |
5512 | } |
5513 | |
5514 | return 0; |
5515 | |
5516 | fail_3: |
5517 | bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); |
5518 | fail_2: |
5519 | bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, |
5520 | WM_TXDESCS_SIZE(txq)); |
5521 | fail_1: |
5522 | bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); |
5523 | fail_0: |
5524 | return error; |
5525 | } |
5526 | |
5527 | static void |
5528 | wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) |
5529 | { |
5530 | |
5531 | bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap); |
5532 | bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); |
5533 | bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, |
5534 | WM_TXDESCS_SIZE(txq)); |
5535 | bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); |
5536 | } |
5537 | |
5538 | static int |
5539 | wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) |
5540 | { |
5541 | int error; |
5542 | |
5543 | /* |
5544 | * Allocate the control data structures, and create and load the |
5545 | * DMA map for it. |
5546 | * |
5547 | * NOTE: All Tx descriptors must be in the same 4G segment of |
5548 | * memory. So must Rx descriptors. We simplify by allocating |
5549 | * both sets within the same 4G segment. |
5550 | */ |
5551 | rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC; |
5552 | if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, |
5553 | PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, |
5554 | 1, &rxq->rxq_desc_rseg, 0)) != 0) { |
5555 | aprint_error_dev(sc->sc_dev, |
5556 | "unable to allocate RX control data, error = %d\n" , |
5557 | error); |
5558 | goto fail_0; |
5559 | } |
5560 | |
5561 | if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg, |
5562 | rxq->rxq_desc_rseg, rxq->rxq_desc_size, |
5563 | (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) { |
5564 | aprint_error_dev(sc->sc_dev, |
5565 | "unable to map RX control data, error = %d\n" , error); |
5566 | goto fail_1; |
5567 | } |
5568 | |
5569 | if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1, |
5570 | rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) { |
5571 | aprint_error_dev(sc->sc_dev, |
5572 | "unable to create RX control data DMA map, error = %d\n" , |
5573 | error); |
5574 | goto fail_2; |
5575 | } |
5576 | |
5577 | if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap, |
5578 | rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) { |
5579 | aprint_error_dev(sc->sc_dev, |
5580 | "unable to load RX control data DMA map, error = %d\n" , |
5581 | error); |
5582 | goto fail_3; |
5583 | } |
5584 | |
5585 | return 0; |
5586 | |
5587 | fail_3: |
5588 | bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); |
5589 | fail_2: |
5590 | bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs, |
5591 | rxq->rxq_desc_size); |
5592 | fail_1: |
5593 | bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); |
5594 | fail_0: |
5595 | return error; |
5596 | } |
5597 | |
5598 | static void |
5599 | wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) |
5600 | { |
5601 | |
5602 | bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap); |
5603 | bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); |
5604 | bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs, |
5605 | rxq->rxq_desc_size); |
5606 | bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); |
5607 | } |
5608 | |
5609 | |
5610 | static int |
5611 | wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) |
5612 | { |
5613 | int i, error; |
5614 | |
5615 | /* Create the transmit buffer DMA maps. */ |
5616 | WM_TXQUEUELEN(txq) = |
5617 | (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? |
5618 | WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; |
5619 | for (i = 0; i < WM_TXQUEUELEN(txq); i++) { |
5620 | if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, |
5621 | WM_NTXSEGS, WTX_MAX_LEN, 0, 0, |
5622 | &txq->txq_soft[i].txs_dmamap)) != 0) { |
5623 | aprint_error_dev(sc->sc_dev, |
5624 | "unable to create Tx DMA map %d, error = %d\n" , |
5625 | i, error); |
5626 | goto fail; |
5627 | } |
5628 | } |
5629 | |
5630 | return 0; |
5631 | |
5632 | fail: |
5633 | for (i = 0; i < WM_TXQUEUELEN(txq); i++) { |
5634 | if (txq->txq_soft[i].txs_dmamap != NULL) |
5635 | bus_dmamap_destroy(sc->sc_dmat, |
5636 | txq->txq_soft[i].txs_dmamap); |
5637 | } |
5638 | return error; |
5639 | } |
5640 | |
5641 | static void |
5642 | wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) |
5643 | { |
5644 | int i; |
5645 | |
5646 | for (i = 0; i < WM_TXQUEUELEN(txq); i++) { |
5647 | if (txq->txq_soft[i].txs_dmamap != NULL) |
5648 | bus_dmamap_destroy(sc->sc_dmat, |
5649 | txq->txq_soft[i].txs_dmamap); |
5650 | } |
5651 | } |
5652 | |
5653 | static int |
5654 | wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) |
5655 | { |
5656 | int i, error; |
5657 | |
5658 | /* Create the receive buffer DMA maps. */ |
5659 | for (i = 0; i < WM_NRXDESC; i++) { |
5660 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
5661 | MCLBYTES, 0, 0, |
5662 | &rxq->rxq_soft[i].rxs_dmamap)) != 0) { |
5663 | aprint_error_dev(sc->sc_dev, |
5664 | "unable to create Rx DMA map %d error = %d\n" , |
5665 | i, error); |
5666 | goto fail; |
5667 | } |
5668 | rxq->rxq_soft[i].rxs_mbuf = NULL; |
5669 | } |
5670 | |
5671 | return 0; |
5672 | |
5673 | fail: |
5674 | for (i = 0; i < WM_NRXDESC; i++) { |
5675 | if (rxq->rxq_soft[i].rxs_dmamap != NULL) |
5676 | bus_dmamap_destroy(sc->sc_dmat, |
5677 | rxq->rxq_soft[i].rxs_dmamap); |
5678 | } |
5679 | return error; |
5680 | } |
5681 | |
5682 | static void |
5683 | wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) |
5684 | { |
5685 | int i; |
5686 | |
5687 | for (i = 0; i < WM_NRXDESC; i++) { |
5688 | if (rxq->rxq_soft[i].rxs_dmamap != NULL) |
5689 | bus_dmamap_destroy(sc->sc_dmat, |
5690 | rxq->rxq_soft[i].rxs_dmamap); |
5691 | } |
5692 | } |
5693 | |
5694 | /* |
5695 | * wm_alloc_quques: |
5696 | * Allocate {tx,rx}descs and {tx,rx} buffers |
5697 | */ |
5698 | static int |
5699 | wm_alloc_txrx_queues(struct wm_softc *sc) |
5700 | { |
5701 | int i, error, tx_done, rx_done; |
5702 | |
5703 | sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues, |
5704 | KM_SLEEP); |
5705 | if (sc->sc_queue == NULL) { |
5706 | aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n" ); |
5707 | error = ENOMEM; |
5708 | goto fail_0; |
5709 | } |
5710 | |
5711 | /* |
5712 | * For transmission |
5713 | */ |
5714 | error = 0; |
5715 | tx_done = 0; |
5716 | for (i = 0; i < sc->sc_nqueues; i++) { |
5717 | #ifdef WM_EVENT_COUNTERS |
5718 | int j; |
5719 | const char *xname; |
5720 | #endif |
5721 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
5722 | txq->txq_sc = sc; |
5723 | txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
5724 | |
5725 | error = wm_alloc_tx_descs(sc, txq); |
5726 | if (error) |
5727 | break; |
5728 | error = wm_alloc_tx_buffer(sc, txq); |
5729 | if (error) { |
5730 | wm_free_tx_descs(sc, txq); |
5731 | break; |
5732 | } |
5733 | txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP); |
5734 | if (txq->txq_interq == NULL) { |
5735 | wm_free_tx_descs(sc, txq); |
5736 | wm_free_tx_buffer(sc, txq); |
5737 | error = ENOMEM; |
5738 | break; |
5739 | } |
5740 | |
5741 | #ifdef WM_EVENT_COUNTERS |
5742 | xname = device_xname(sc->sc_dev); |
5743 | |
5744 | WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname); |
5745 | WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname); |
5746 | WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname); |
5747 | WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname); |
5748 | WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname); |
5749 | |
5750 | WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname); |
5751 | WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname); |
5752 | WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname); |
5753 | WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname); |
5754 | WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname); |
5755 | WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname); |
5756 | |
5757 | for (j = 0; j < WM_NTXSEGS; j++) { |
5758 | snprintf(txq->txq_txseg_evcnt_names[j], |
5759 | sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d" , i, j); |
5760 | evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC, |
5761 | NULL, xname, txq->txq_txseg_evcnt_names[j]); |
5762 | } |
5763 | |
5764 | WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname); |
5765 | |
5766 | WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname); |
5767 | #endif /* WM_EVENT_COUNTERS */ |
5768 | |
5769 | tx_done++; |
5770 | } |
5771 | if (error) |
5772 | goto fail_1; |
5773 | |
5774 | /* |
5775 | * For recieve |
5776 | */ |
5777 | error = 0; |
5778 | rx_done = 0; |
5779 | for (i = 0; i < sc->sc_nqueues; i++) { |
5780 | #ifdef WM_EVENT_COUNTERS |
5781 | const char *xname; |
5782 | #endif |
5783 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
5784 | rxq->rxq_sc = sc; |
5785 | rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
5786 | |
5787 | error = wm_alloc_rx_descs(sc, rxq); |
5788 | if (error) |
5789 | break; |
5790 | |
5791 | error = wm_alloc_rx_buffer(sc, rxq); |
5792 | if (error) { |
5793 | wm_free_rx_descs(sc, rxq); |
5794 | break; |
5795 | } |
5796 | |
5797 | #ifdef WM_EVENT_COUNTERS |
5798 | xname = device_xname(sc->sc_dev); |
5799 | |
5800 | WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname); |
5801 | |
5802 | WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname); |
5803 | WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname); |
5804 | #endif /* WM_EVENT_COUNTERS */ |
5805 | |
5806 | rx_done++; |
5807 | } |
5808 | if (error) |
5809 | goto fail_2; |
5810 | |
5811 | return 0; |
5812 | |
5813 | fail_2: |
5814 | for (i = 0; i < rx_done; i++) { |
5815 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
5816 | wm_free_rx_buffer(sc, rxq); |
5817 | wm_free_rx_descs(sc, rxq); |
5818 | if (rxq->rxq_lock) |
5819 | mutex_obj_free(rxq->rxq_lock); |
5820 | } |
5821 | fail_1: |
5822 | for (i = 0; i < tx_done; i++) { |
5823 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
5824 | pcq_destroy(txq->txq_interq); |
5825 | wm_free_tx_buffer(sc, txq); |
5826 | wm_free_tx_descs(sc, txq); |
5827 | if (txq->txq_lock) |
5828 | mutex_obj_free(txq->txq_lock); |
5829 | } |
5830 | |
5831 | kmem_free(sc->sc_queue, |
5832 | sizeof(struct wm_queue) * sc->sc_nqueues); |
5833 | fail_0: |
5834 | return error; |
5835 | } |
5836 | |
5837 | /* |
5838 | * wm_free_quques: |
5839 | * Free {tx,rx}descs and {tx,rx} buffers |
5840 | */ |
5841 | static void |
5842 | wm_free_txrx_queues(struct wm_softc *sc) |
5843 | { |
5844 | int i; |
5845 | |
5846 | for (i = 0; i < sc->sc_nqueues; i++) { |
5847 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
5848 | wm_free_rx_buffer(sc, rxq); |
5849 | wm_free_rx_descs(sc, rxq); |
5850 | if (rxq->rxq_lock) |
5851 | mutex_obj_free(rxq->rxq_lock); |
5852 | } |
5853 | |
5854 | for (i = 0; i < sc->sc_nqueues; i++) { |
5855 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
5856 | wm_free_tx_buffer(sc, txq); |
5857 | wm_free_tx_descs(sc, txq); |
5858 | if (txq->txq_lock) |
5859 | mutex_obj_free(txq->txq_lock); |
5860 | } |
5861 | |
5862 | kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues); |
5863 | } |
5864 | |
5865 | static void |
5866 | wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq) |
5867 | { |
5868 | |
5869 | KASSERT(mutex_owned(txq->txq_lock)); |
5870 | |
5871 | /* Initialize the transmit descriptor ring. */ |
5872 | memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq)); |
5873 | wm_cdtxsync(txq, 0, WM_NTXDESC(txq), |
5874 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
5875 | txq->txq_free = WM_NTXDESC(txq); |
5876 | txq->txq_next = 0; |
5877 | } |
5878 | |
5879 | static void |
5880 | wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq, |
5881 | struct wm_txqueue *txq) |
5882 | { |
5883 | |
5884 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
5885 | device_xname(sc->sc_dev), __func__)); |
5886 | KASSERT(mutex_owned(txq->txq_lock)); |
5887 | |
5888 | if (sc->sc_type < WM_T_82543) { |
5889 | CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0)); |
5890 | CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0)); |
5891 | CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq)); |
5892 | CSR_WRITE(sc, WMREG_OLD_TDH, 0); |
5893 | CSR_WRITE(sc, WMREG_OLD_TDT, 0); |
5894 | CSR_WRITE(sc, WMREG_OLD_TIDV, 128); |
5895 | } else { |
5896 | int qid = wmq->wmq_id; |
5897 | |
5898 | CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0)); |
5899 | CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0)); |
5900 | CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq)); |
5901 | CSR_WRITE(sc, WMREG_TDH(qid), 0); |
5902 | |
5903 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
5904 | /* |
5905 | * Don't write TDT before TCTL.EN is set. |
5906 | * See the document. |
5907 | */ |
5908 | CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE |
5909 | | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) |
5910 | | TXDCTL_WTHRESH(0)); |
5911 | else { |
5912 | /* ITR / 4 */ |
5913 | CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4); |
5914 | if (sc->sc_type >= WM_T_82540) { |
5915 | /* should be same */ |
5916 | CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4); |
5917 | } |
5918 | |
5919 | CSR_WRITE(sc, WMREG_TDT(qid), 0); |
5920 | CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) | |
5921 | TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); |
5922 | } |
5923 | } |
5924 | } |
5925 | |
5926 | static void |
5927 | wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq) |
5928 | { |
5929 | int i; |
5930 | |
5931 | KASSERT(mutex_owned(txq->txq_lock)); |
5932 | |
5933 | /* Initialize the transmit job descriptors. */ |
5934 | for (i = 0; i < WM_TXQUEUELEN(txq); i++) |
5935 | txq->txq_soft[i].txs_mbuf = NULL; |
5936 | txq->txq_sfree = WM_TXQUEUELEN(txq); |
5937 | txq->txq_snext = 0; |
5938 | txq->txq_sdirty = 0; |
5939 | } |
5940 | |
5941 | static void |
5942 | wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq, |
5943 | struct wm_txqueue *txq) |
5944 | { |
5945 | |
5946 | KASSERT(mutex_owned(txq->txq_lock)); |
5947 | |
5948 | /* |
5949 | * Set up some register offsets that are different between |
5950 | * the i82542 and the i82543 and later chips. |
5951 | */ |
5952 | if (sc->sc_type < WM_T_82543) |
5953 | txq->txq_tdt_reg = WMREG_OLD_TDT; |
5954 | else |
5955 | txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id); |
5956 | |
5957 | wm_init_tx_descs(sc, txq); |
5958 | wm_init_tx_regs(sc, wmq, txq); |
5959 | wm_init_tx_buffer(sc, txq); |
5960 | } |
5961 | |
5962 | static void |
5963 | wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq, |
5964 | struct wm_rxqueue *rxq) |
5965 | { |
5966 | |
5967 | KASSERT(mutex_owned(rxq->rxq_lock)); |
5968 | |
5969 | /* |
5970 | * Initialize the receive descriptor and receive job |
5971 | * descriptor rings. |
5972 | */ |
5973 | if (sc->sc_type < WM_T_82543) { |
5974 | CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0)); |
5975 | CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0)); |
5976 | CSR_WRITE(sc, WMREG_OLD_RDLEN0, |
5977 | sizeof(wiseman_rxdesc_t) * WM_NRXDESC); |
5978 | CSR_WRITE(sc, WMREG_OLD_RDH0, 0); |
5979 | CSR_WRITE(sc, WMREG_OLD_RDT0, 0); |
5980 | CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); |
5981 | |
5982 | CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); |
5983 | CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); |
5984 | CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); |
5985 | CSR_WRITE(sc, WMREG_OLD_RDH1, 0); |
5986 | CSR_WRITE(sc, WMREG_OLD_RDT1, 0); |
5987 | CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); |
5988 | } else { |
5989 | int qid = wmq->wmq_id; |
5990 | |
5991 | CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); |
5992 | CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); |
5993 | CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size); |
5994 | |
5995 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
5996 | if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) |
5997 | panic("%s: MCLBYTES %d unsupported for i2575 or higher\n" , __func__, MCLBYTES); |
5998 | CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY |
5999 | | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); |
6000 | CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE |
6001 | | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) |
6002 | | RXDCTL_WTHRESH(1)); |
6003 | CSR_WRITE(sc, WMREG_RDH(qid), 0); |
6004 | CSR_WRITE(sc, WMREG_RDT(qid), 0); |
6005 | } else { |
6006 | CSR_WRITE(sc, WMREG_RDH(qid), 0); |
6007 | CSR_WRITE(sc, WMREG_RDT(qid), 0); |
6008 | /* ITR / 4 */ |
6009 | CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD); |
6010 | /* MUST be same */ |
6011 | CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4); |
6012 | CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) | |
6013 | RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); |
6014 | } |
6015 | } |
6016 | } |
6017 | |
6018 | static int |
6019 | wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) |
6020 | { |
6021 | struct wm_rxsoft *rxs; |
6022 | int error, i; |
6023 | |
6024 | KASSERT(mutex_owned(rxq->rxq_lock)); |
6025 | |
6026 | for (i = 0; i < WM_NRXDESC; i++) { |
6027 | rxs = &rxq->rxq_soft[i]; |
6028 | if (rxs->rxs_mbuf == NULL) { |
6029 | if ((error = wm_add_rxbuf(rxq, i)) != 0) { |
6030 | log(LOG_ERR, "%s: unable to allocate or map " |
6031 | "rx buffer %d, error = %d\n" , |
6032 | device_xname(sc->sc_dev), i, error); |
6033 | /* |
6034 | * XXX Should attempt to run with fewer receive |
6035 | * XXX buffers instead of just failing. |
6036 | */ |
6037 | wm_rxdrain(rxq); |
6038 | return ENOMEM; |
6039 | } |
6040 | } else { |
6041 | if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) |
6042 | wm_init_rxdesc(rxq, i); |
6043 | /* |
6044 | * For 82575 and newer device, the RX descriptors |
6045 | * must be initialized after the setting of RCTL.EN in |
6046 | * wm_set_filter() |
6047 | */ |
6048 | } |
6049 | } |
6050 | rxq->rxq_ptr = 0; |
6051 | rxq->rxq_discard = 0; |
6052 | WM_RXCHAIN_RESET(rxq); |
6053 | |
6054 | return 0; |
6055 | } |
6056 | |
6057 | static int |
6058 | wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq, |
6059 | struct wm_rxqueue *rxq) |
6060 | { |
6061 | |
6062 | KASSERT(mutex_owned(rxq->rxq_lock)); |
6063 | |
6064 | /* |
6065 | * Set up some register offsets that are different between |
6066 | * the i82542 and the i82543 and later chips. |
6067 | */ |
6068 | if (sc->sc_type < WM_T_82543) |
6069 | rxq->rxq_rdt_reg = WMREG_OLD_RDT0; |
6070 | else |
6071 | rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id); |
6072 | |
6073 | wm_init_rx_regs(sc, wmq, rxq); |
6074 | return wm_init_rx_buffer(sc, rxq); |
6075 | } |
6076 | |
6077 | /* |
6078 | * wm_init_quques: |
6079 | * Initialize {tx,rx}descs and {tx,rx} buffers |
6080 | */ |
6081 | static int |
6082 | wm_init_txrx_queues(struct wm_softc *sc) |
6083 | { |
6084 | int i, error = 0; |
6085 | |
6086 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
6087 | device_xname(sc->sc_dev), __func__)); |
6088 | |
6089 | for (i = 0; i < sc->sc_nqueues; i++) { |
6090 | struct wm_queue *wmq = &sc->sc_queue[i]; |
6091 | struct wm_txqueue *txq = &wmq->wmq_txq; |
6092 | struct wm_rxqueue *rxq = &wmq->wmq_rxq; |
6093 | |
6094 | mutex_enter(txq->txq_lock); |
6095 | wm_init_tx_queue(sc, wmq, txq); |
6096 | mutex_exit(txq->txq_lock); |
6097 | |
6098 | mutex_enter(rxq->rxq_lock); |
6099 | error = wm_init_rx_queue(sc, wmq, rxq); |
6100 | mutex_exit(rxq->rxq_lock); |
6101 | if (error) |
6102 | break; |
6103 | } |
6104 | |
6105 | return error; |
6106 | } |
6107 | |
6108 | /* |
6109 | * wm_tx_offload: |
6110 | * |
6111 | * Set up TCP/IP checksumming parameters for the |
6112 | * specified packet. |
6113 | */ |
6114 | static int |
6115 | wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, |
6116 | uint8_t *fieldsp) |
6117 | { |
6118 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
6119 | struct mbuf *m0 = txs->txs_mbuf; |
6120 | struct livengood_tcpip_ctxdesc *t; |
6121 | uint32_t ipcs, tucs, cmd, cmdlen, seg; |
6122 | uint32_t ipcse; |
6123 | struct ether_header *eh; |
6124 | int offset, iphl; |
6125 | uint8_t fields; |
6126 | |
6127 | /* |
6128 | * XXX It would be nice if the mbuf pkthdr had offset |
6129 | * fields for the protocol headers. |
6130 | */ |
6131 | |
6132 | eh = mtod(m0, struct ether_header *); |
6133 | switch (htons(eh->ether_type)) { |
6134 | case ETHERTYPE_IP: |
6135 | case ETHERTYPE_IPV6: |
6136 | offset = ETHER_HDR_LEN; |
6137 | break; |
6138 | |
6139 | case ETHERTYPE_VLAN: |
6140 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
6141 | break; |
6142 | |
6143 | default: |
6144 | /* |
6145 | * Don't support this protocol or encapsulation. |
6146 | */ |
6147 | *fieldsp = 0; |
6148 | *cmdp = 0; |
6149 | return 0; |
6150 | } |
6151 | |
6152 | if ((m0->m_pkthdr.csum_flags & |
6153 | (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) { |
6154 | iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); |
6155 | } else { |
6156 | iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); |
6157 | } |
6158 | ipcse = offset + iphl - 1; |
6159 | |
6160 | cmd = WTX_CMD_DEXT | WTX_DTYP_D; |
6161 | cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; |
6162 | seg = 0; |
6163 | fields = 0; |
6164 | |
6165 | if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { |
6166 | int hlen = offset + iphl; |
6167 | bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; |
6168 | |
6169 | if (__predict_false(m0->m_len < |
6170 | (hlen + sizeof(struct tcphdr)))) { |
6171 | /* |
6172 | * TCP/IP headers are not in the first mbuf; we need |
6173 | * to do this the slow and painful way. Let's just |
6174 | * hope this doesn't happen very often. |
6175 | */ |
6176 | struct tcphdr th; |
6177 | |
6178 | WM_Q_EVCNT_INCR(txq, txtsopain); |
6179 | |
6180 | m_copydata(m0, hlen, sizeof(th), &th); |
6181 | if (v4) { |
6182 | struct ip ip; |
6183 | |
6184 | m_copydata(m0, offset, sizeof(ip), &ip); |
6185 | ip.ip_len = 0; |
6186 | m_copyback(m0, |
6187 | offset + offsetof(struct ip, ip_len), |
6188 | sizeof(ip.ip_len), &ip.ip_len); |
6189 | th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, |
6190 | ip.ip_dst.s_addr, htons(IPPROTO_TCP)); |
6191 | } else { |
6192 | struct ip6_hdr ip6; |
6193 | |
6194 | m_copydata(m0, offset, sizeof(ip6), &ip6); |
6195 | ip6.ip6_plen = 0; |
6196 | m_copyback(m0, |
6197 | offset + offsetof(struct ip6_hdr, ip6_plen), |
6198 | sizeof(ip6.ip6_plen), &ip6.ip6_plen); |
6199 | th.th_sum = in6_cksum_phdr(&ip6.ip6_src, |
6200 | &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); |
6201 | } |
6202 | m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), |
6203 | sizeof(th.th_sum), &th.th_sum); |
6204 | |
6205 | hlen += th.th_off << 2; |
6206 | } else { |
6207 | /* |
6208 | * TCP/IP headers are in the first mbuf; we can do |
6209 | * this the easy way. |
6210 | */ |
6211 | struct tcphdr *th; |
6212 | |
6213 | if (v4) { |
6214 | struct ip *ip = |
6215 | (void *)(mtod(m0, char *) + offset); |
6216 | th = (void *)(mtod(m0, char *) + hlen); |
6217 | |
6218 | ip->ip_len = 0; |
6219 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, |
6220 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
6221 | } else { |
6222 | struct ip6_hdr *ip6 = |
6223 | (void *)(mtod(m0, char *) + offset); |
6224 | th = (void *)(mtod(m0, char *) + hlen); |
6225 | |
6226 | ip6->ip6_plen = 0; |
6227 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, |
6228 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); |
6229 | } |
6230 | hlen += th->th_off << 2; |
6231 | } |
6232 | |
6233 | if (v4) { |
6234 | WM_Q_EVCNT_INCR(txq, txtso); |
6235 | cmdlen |= WTX_TCPIP_CMD_IP; |
6236 | } else { |
6237 | WM_Q_EVCNT_INCR(txq, txtso6); |
6238 | ipcse = 0; |
6239 | } |
6240 | cmd |= WTX_TCPIP_CMD_TSE; |
6241 | cmdlen |= WTX_TCPIP_CMD_TSE | |
6242 | WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); |
6243 | seg = WTX_TCPIP_SEG_HDRLEN(hlen) | |
6244 | WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); |
6245 | } |
6246 | |
6247 | /* |
6248 | * NOTE: Even if we're not using the IP or TCP/UDP checksum |
6249 | * offload feature, if we load the context descriptor, we |
6250 | * MUST provide valid values for IPCSS and TUCSS fields. |
6251 | */ |
6252 | |
6253 | ipcs = WTX_TCPIP_IPCSS(offset) | |
6254 | WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | |
6255 | WTX_TCPIP_IPCSE(ipcse); |
6256 | if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) { |
6257 | WM_Q_EVCNT_INCR(txq, txipsum); |
6258 | fields |= WTX_IXSM; |
6259 | } |
6260 | |
6261 | offset += iphl; |
6262 | |
6263 | if (m0->m_pkthdr.csum_flags & |
6264 | (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) { |
6265 | WM_Q_EVCNT_INCR(txq, txtusum); |
6266 | fields |= WTX_TXSM; |
6267 | tucs = WTX_TCPIP_TUCSS(offset) | |
6268 | WTX_TCPIP_TUCSO(offset + |
6269 | M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | |
6270 | WTX_TCPIP_TUCSE(0) /* rest of packet */; |
6271 | } else if ((m0->m_pkthdr.csum_flags & |
6272 | (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) { |
6273 | WM_Q_EVCNT_INCR(txq, txtusum6); |
6274 | fields |= WTX_TXSM; |
6275 | tucs = WTX_TCPIP_TUCSS(offset) | |
6276 | WTX_TCPIP_TUCSO(offset + |
6277 | M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | |
6278 | WTX_TCPIP_TUCSE(0) /* rest of packet */; |
6279 | } else { |
6280 | /* Just initialize it to a valid TCP context. */ |
6281 | tucs = WTX_TCPIP_TUCSS(offset) | |
6282 | WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | |
6283 | WTX_TCPIP_TUCSE(0) /* rest of packet */; |
6284 | } |
6285 | |
6286 | /* Fill in the context descriptor. */ |
6287 | t = (struct livengood_tcpip_ctxdesc *) |
6288 | &txq->txq_descs[txq->txq_next]; |
6289 | t->tcpip_ipcs = htole32(ipcs); |
6290 | t->tcpip_tucs = htole32(tucs); |
6291 | t->tcpip_cmdlen = htole32(cmdlen); |
6292 | t->tcpip_seg = htole32(seg); |
6293 | wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); |
6294 | |
6295 | txq->txq_next = WM_NEXTTX(txq, txq->txq_next); |
6296 | txs->txs_ndesc++; |
6297 | |
6298 | *cmdp = cmd; |
6299 | *fieldsp = fields; |
6300 | |
6301 | return 0; |
6302 | } |
6303 | |
6304 | /* |
6305 | * wm_start: [ifnet interface function] |
6306 | * |
6307 | * Start packet transmission on the interface. |
6308 | */ |
6309 | static void |
6310 | wm_start(struct ifnet *ifp) |
6311 | { |
6312 | struct wm_softc *sc = ifp->if_softc; |
6313 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
6314 | |
6315 | KASSERT(ifp->if_extflags & IFEF_START_MPSAFE); |
6316 | |
6317 | mutex_enter(txq->txq_lock); |
6318 | if (!txq->txq_stopping) |
6319 | wm_start_locked(ifp); |
6320 | mutex_exit(txq->txq_lock); |
6321 | } |
6322 | |
6323 | static void |
6324 | wm_start_locked(struct ifnet *ifp) |
6325 | { |
6326 | struct wm_softc *sc = ifp->if_softc; |
6327 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
6328 | struct mbuf *m0; |
6329 | struct m_tag *mtag; |
6330 | struct wm_txsoft *txs; |
6331 | bus_dmamap_t dmamap; |
6332 | int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; |
6333 | bus_addr_t curaddr; |
6334 | bus_size_t seglen, curlen; |
6335 | uint32_t cksumcmd; |
6336 | uint8_t cksumfields; |
6337 | |
6338 | KASSERT(mutex_owned(txq->txq_lock)); |
6339 | |
6340 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
6341 | return; |
6342 | |
6343 | /* Remember the previous number of free descriptors. */ |
6344 | ofree = txq->txq_free; |
6345 | |
6346 | /* |
6347 | * Loop through the send queue, setting up transmit descriptors |
6348 | * until we drain the queue, or use up all available transmit |
6349 | * descriptors. |
6350 | */ |
6351 | for (;;) { |
6352 | m0 = NULL; |
6353 | |
6354 | /* Get a work queue entry. */ |
6355 | if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { |
6356 | wm_txeof(sc, txq); |
6357 | if (txq->txq_sfree == 0) { |
6358 | DPRINTF(WM_DEBUG_TX, |
6359 | ("%s: TX: no free job descriptors\n" , |
6360 | device_xname(sc->sc_dev))); |
6361 | WM_Q_EVCNT_INCR(txq, txsstall); |
6362 | break; |
6363 | } |
6364 | } |
6365 | |
6366 | /* Grab a packet off the queue. */ |
6367 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
6368 | if (m0 == NULL) |
6369 | break; |
6370 | |
6371 | DPRINTF(WM_DEBUG_TX, |
6372 | ("%s: TX: have packet to transmit: %p\n" , |
6373 | device_xname(sc->sc_dev), m0)); |
6374 | |
6375 | txs = &txq->txq_soft[txq->txq_snext]; |
6376 | dmamap = txs->txs_dmamap; |
6377 | |
6378 | use_tso = (m0->m_pkthdr.csum_flags & |
6379 | (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; |
6380 | |
6381 | /* |
6382 | * So says the Linux driver: |
6383 | * The controller does a simple calculation to make sure |
6384 | * there is enough room in the FIFO before initiating the |
6385 | * DMA for each buffer. The calc is: |
6386 | * 4 = ceil(buffer len / MSS) |
6387 | * To make sure we don't overrun the FIFO, adjust the max |
6388 | * buffer len if the MSS drops. |
6389 | */ |
6390 | dmamap->dm_maxsegsz = |
6391 | (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) |
6392 | ? m0->m_pkthdr.segsz << 2 |
6393 | : WTX_MAX_LEN; |
6394 | |
6395 | /* |
6396 | * Load the DMA map. If this fails, the packet either |
6397 | * didn't fit in the allotted number of segments, or we |
6398 | * were short on resources. For the too-many-segments |
6399 | * case, we simply report an error and drop the packet, |
6400 | * since we can't sanely copy a jumbo packet to a single |
6401 | * buffer. |
6402 | */ |
6403 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
6404 | BUS_DMA_WRITE | BUS_DMA_NOWAIT); |
6405 | if (error) { |
6406 | if (error == EFBIG) { |
6407 | WM_Q_EVCNT_INCR(txq, txdrop); |
6408 | log(LOG_ERR, "%s: Tx packet consumes too many " |
6409 | "DMA segments, dropping...\n" , |
6410 | device_xname(sc->sc_dev)); |
6411 | wm_dump_mbuf_chain(sc, m0); |
6412 | m_freem(m0); |
6413 | continue; |
6414 | } |
6415 | /* Short on resources, just stop for now. */ |
6416 | DPRINTF(WM_DEBUG_TX, |
6417 | ("%s: TX: dmamap load failed: %d\n" , |
6418 | device_xname(sc->sc_dev), error)); |
6419 | break; |
6420 | } |
6421 | |
6422 | segs_needed = dmamap->dm_nsegs; |
6423 | if (use_tso) { |
6424 | /* For sentinel descriptor; see below. */ |
6425 | segs_needed++; |
6426 | } |
6427 | |
6428 | /* |
6429 | * Ensure we have enough descriptors free to describe |
6430 | * the packet. Note, we always reserve one descriptor |
6431 | * at the end of the ring due to the semantics of the |
6432 | * TDT register, plus one more in the event we need |
6433 | * to load offload context. |
6434 | */ |
6435 | if (segs_needed > txq->txq_free - 2) { |
6436 | /* |
6437 | * Not enough free descriptors to transmit this |
6438 | * packet. We haven't committed anything yet, |
6439 | * so just unload the DMA map, put the packet |
6440 | * pack on the queue, and punt. Notify the upper |
6441 | * layer that there are no more slots left. |
6442 | */ |
6443 | DPRINTF(WM_DEBUG_TX, |
6444 | ("%s: TX: need %d (%d) descriptors, have %d\n" , |
6445 | device_xname(sc->sc_dev), dmamap->dm_nsegs, |
6446 | segs_needed, txq->txq_free - 1)); |
6447 | ifp->if_flags |= IFF_OACTIVE; |
6448 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
6449 | WM_Q_EVCNT_INCR(txq, txdstall); |
6450 | break; |
6451 | } |
6452 | |
6453 | /* |
6454 | * Check for 82547 Tx FIFO bug. We need to do this |
6455 | * once we know we can transmit the packet, since we |
6456 | * do some internal FIFO space accounting here. |
6457 | */ |
6458 | if (sc->sc_type == WM_T_82547 && |
6459 | wm_82547_txfifo_bugchk(sc, m0)) { |
6460 | DPRINTF(WM_DEBUG_TX, |
6461 | ("%s: TX: 82547 Tx FIFO bug detected\n" , |
6462 | device_xname(sc->sc_dev))); |
6463 | ifp->if_flags |= IFF_OACTIVE; |
6464 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
6465 | WM_Q_EVCNT_INCR(txq, txfifo_stall); |
6466 | break; |
6467 | } |
6468 | |
6469 | /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ |
6470 | |
6471 | DPRINTF(WM_DEBUG_TX, |
6472 | ("%s: TX: packet has %d (%d) DMA segments\n" , |
6473 | device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); |
6474 | |
6475 | WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); |
6476 | |
6477 | /* |
6478 | * Store a pointer to the packet so that we can free it |
6479 | * later. |
6480 | * |
6481 | * Initially, we consider the number of descriptors the |
6482 | * packet uses the number of DMA segments. This may be |
6483 | * incremented by 1 if we do checksum offload (a descriptor |
6484 | * is used to set the checksum context). |
6485 | */ |
6486 | txs->txs_mbuf = m0; |
6487 | txs->txs_firstdesc = txq->txq_next; |
6488 | txs->txs_ndesc = segs_needed; |
6489 | |
6490 | /* Set up offload parameters for this packet. */ |
6491 | if (m0->m_pkthdr.csum_flags & |
6492 | (M_CSUM_TSOv4 | M_CSUM_TSOv6 | |
6493 | M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | |
6494 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { |
6495 | if (wm_tx_offload(sc, txs, &cksumcmd, |
6496 | &cksumfields) != 0) { |
6497 | /* Error message already displayed. */ |
6498 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
6499 | continue; |
6500 | } |
6501 | } else { |
6502 | cksumcmd = 0; |
6503 | cksumfields = 0; |
6504 | } |
6505 | |
6506 | cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; |
6507 | |
6508 | /* Sync the DMA map. */ |
6509 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
6510 | BUS_DMASYNC_PREWRITE); |
6511 | |
6512 | /* Initialize the transmit descriptor. */ |
6513 | for (nexttx = txq->txq_next, seg = 0; |
6514 | seg < dmamap->dm_nsegs; seg++) { |
6515 | for (seglen = dmamap->dm_segs[seg].ds_len, |
6516 | curaddr = dmamap->dm_segs[seg].ds_addr; |
6517 | seglen != 0; |
6518 | curaddr += curlen, seglen -= curlen, |
6519 | nexttx = WM_NEXTTX(txq, nexttx)) { |
6520 | curlen = seglen; |
6521 | |
6522 | /* |
6523 | * So says the Linux driver: |
6524 | * Work around for premature descriptor |
6525 | * write-backs in TSO mode. Append a |
6526 | * 4-byte sentinel descriptor. |
6527 | */ |
6528 | if (use_tso && seg == dmamap->dm_nsegs - 1 && |
6529 | curlen > 8) |
6530 | curlen -= 4; |
6531 | |
6532 | wm_set_dma_addr( |
6533 | &txq->txq_descs[nexttx].wtx_addr, curaddr); |
6534 | txq->txq_descs[nexttx].wtx_cmdlen |
6535 | = htole32(cksumcmd | curlen); |
6536 | txq->txq_descs[nexttx].wtx_fields.wtxu_status |
6537 | = 0; |
6538 | txq->txq_descs[nexttx].wtx_fields.wtxu_options |
6539 | = cksumfields; |
6540 | txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; |
6541 | lasttx = nexttx; |
6542 | |
6543 | DPRINTF(WM_DEBUG_TX, |
6544 | ("%s: TX: desc %d: low %#" PRIx64 ", " |
6545 | "len %#04zx\n" , |
6546 | device_xname(sc->sc_dev), nexttx, |
6547 | (uint64_t)curaddr, curlen)); |
6548 | } |
6549 | } |
6550 | |
6551 | KASSERT(lasttx != -1); |
6552 | |
6553 | /* |
6554 | * Set up the command byte on the last descriptor of |
6555 | * the packet. If we're in the interrupt delay window, |
6556 | * delay the interrupt. |
6557 | */ |
6558 | txq->txq_descs[lasttx].wtx_cmdlen |= |
6559 | htole32(WTX_CMD_EOP | WTX_CMD_RS); |
6560 | |
6561 | /* |
6562 | * If VLANs are enabled and the packet has a VLAN tag, set |
6563 | * up the descriptor to encapsulate the packet for us. |
6564 | * |
6565 | * This is only valid on the last descriptor of the packet. |
6566 | */ |
6567 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { |
6568 | txq->txq_descs[lasttx].wtx_cmdlen |= |
6569 | htole32(WTX_CMD_VLE); |
6570 | txq->txq_descs[lasttx].wtx_fields.wtxu_vlan |
6571 | = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); |
6572 | } |
6573 | |
6574 | txs->txs_lastdesc = lasttx; |
6575 | |
6576 | DPRINTF(WM_DEBUG_TX, |
6577 | ("%s: TX: desc %d: cmdlen 0x%08x\n" , |
6578 | device_xname(sc->sc_dev), |
6579 | lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); |
6580 | |
6581 | /* Sync the descriptors we're using. */ |
6582 | wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, |
6583 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
6584 | |
6585 | /* Give the packet to the chip. */ |
6586 | CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); |
6587 | |
6588 | DPRINTF(WM_DEBUG_TX, |
6589 | ("%s: TX: TDT -> %d\n" , device_xname(sc->sc_dev), nexttx)); |
6590 | |
6591 | DPRINTF(WM_DEBUG_TX, |
6592 | ("%s: TX: finished transmitting packet, job %d\n" , |
6593 | device_xname(sc->sc_dev), txq->txq_snext)); |
6594 | |
6595 | /* Advance the tx pointer. */ |
6596 | txq->txq_free -= txs->txs_ndesc; |
6597 | txq->txq_next = nexttx; |
6598 | |
6599 | txq->txq_sfree--; |
6600 | txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); |
6601 | |
6602 | /* Pass the packet to any BPF listeners. */ |
6603 | bpf_mtap(ifp, m0); |
6604 | } |
6605 | |
6606 | if (m0 != NULL) { |
6607 | ifp->if_flags |= IFF_OACTIVE; |
6608 | WM_Q_EVCNT_INCR(txq, txdrop); |
6609 | DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n" , |
6610 | __func__)); |
6611 | m_freem(m0); |
6612 | } |
6613 | |
6614 | if (txq->txq_sfree == 0 || txq->txq_free <= 2) { |
6615 | /* No more slots; notify upper layer. */ |
6616 | ifp->if_flags |= IFF_OACTIVE; |
6617 | } |
6618 | |
6619 | if (txq->txq_free != ofree) { |
6620 | /* Set a watchdog timer in case the chip flakes out. */ |
6621 | ifp->if_timer = 5; |
6622 | } |
6623 | } |
6624 | |
6625 | /* |
6626 | * wm_nq_tx_offload: |
6627 | * |
6628 | * Set up TCP/IP checksumming parameters for the |
6629 | * specified packet, for NEWQUEUE devices |
6630 | */ |
6631 | static int |
6632 | wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, |
6633 | struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) |
6634 | { |
6635 | struct mbuf *m0 = txs->txs_mbuf; |
6636 | struct m_tag *mtag; |
6637 | uint32_t vl_len, mssidx, cmdc; |
6638 | struct ether_header *eh; |
6639 | int offset, iphl; |
6640 | |
6641 | /* |
6642 | * XXX It would be nice if the mbuf pkthdr had offset |
6643 | * fields for the protocol headers. |
6644 | */ |
6645 | *cmdlenp = 0; |
6646 | *fieldsp = 0; |
6647 | |
6648 | eh = mtod(m0, struct ether_header *); |
6649 | switch (htons(eh->ether_type)) { |
6650 | case ETHERTYPE_IP: |
6651 | case ETHERTYPE_IPV6: |
6652 | offset = ETHER_HDR_LEN; |
6653 | break; |
6654 | |
6655 | case ETHERTYPE_VLAN: |
6656 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
6657 | break; |
6658 | |
6659 | default: |
6660 | /* Don't support this protocol or encapsulation. */ |
6661 | *do_csum = false; |
6662 | return 0; |
6663 | } |
6664 | *do_csum = true; |
6665 | *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; |
6666 | cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; |
6667 | |
6668 | vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); |
6669 | KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); |
6670 | |
6671 | if ((m0->m_pkthdr.csum_flags & |
6672 | (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { |
6673 | iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); |
6674 | } else { |
6675 | iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); |
6676 | } |
6677 | vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); |
6678 | KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); |
6679 | |
6680 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { |
6681 | vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK) |
6682 | << NQTXC_VLLEN_VLAN_SHIFT); |
6683 | *cmdlenp |= NQTX_CMD_VLE; |
6684 | } |
6685 | |
6686 | mssidx = 0; |
6687 | |
6688 | if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { |
6689 | int hlen = offset + iphl; |
6690 | int tcp_hlen; |
6691 | bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; |
6692 | |
6693 | if (__predict_false(m0->m_len < |
6694 | (hlen + sizeof(struct tcphdr)))) { |
6695 | /* |
6696 | * TCP/IP headers are not in the first mbuf; we need |
6697 | * to do this the slow and painful way. Let's just |
6698 | * hope this doesn't happen very often. |
6699 | */ |
6700 | struct tcphdr th; |
6701 | |
6702 | WM_Q_EVCNT_INCR(txq, txtsopain); |
6703 | |
6704 | m_copydata(m0, hlen, sizeof(th), &th); |
6705 | if (v4) { |
6706 | struct ip ip; |
6707 | |
6708 | m_copydata(m0, offset, sizeof(ip), &ip); |
6709 | ip.ip_len = 0; |
6710 | m_copyback(m0, |
6711 | offset + offsetof(struct ip, ip_len), |
6712 | sizeof(ip.ip_len), &ip.ip_len); |
6713 | th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, |
6714 | ip.ip_dst.s_addr, htons(IPPROTO_TCP)); |
6715 | } else { |
6716 | struct ip6_hdr ip6; |
6717 | |
6718 | m_copydata(m0, offset, sizeof(ip6), &ip6); |
6719 | ip6.ip6_plen = 0; |
6720 | m_copyback(m0, |
6721 | offset + offsetof(struct ip6_hdr, ip6_plen), |
6722 | sizeof(ip6.ip6_plen), &ip6.ip6_plen); |
6723 | th.th_sum = in6_cksum_phdr(&ip6.ip6_src, |
6724 | &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); |
6725 | } |
6726 | m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), |
6727 | sizeof(th.th_sum), &th.th_sum); |
6728 | |
6729 | tcp_hlen = th.th_off << 2; |
6730 | } else { |
6731 | /* |
6732 | * TCP/IP headers are in the first mbuf; we can do |
6733 | * this the easy way. |
6734 | */ |
6735 | struct tcphdr *th; |
6736 | |
6737 | if (v4) { |
6738 | struct ip *ip = |
6739 | (void *)(mtod(m0, char *) + offset); |
6740 | th = (void *)(mtod(m0, char *) + hlen); |
6741 | |
6742 | ip->ip_len = 0; |
6743 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, |
6744 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
6745 | } else { |
6746 | struct ip6_hdr *ip6 = |
6747 | (void *)(mtod(m0, char *) + offset); |
6748 | th = (void *)(mtod(m0, char *) + hlen); |
6749 | |
6750 | ip6->ip6_plen = 0; |
6751 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, |
6752 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); |
6753 | } |
6754 | tcp_hlen = th->th_off << 2; |
6755 | } |
6756 | hlen += tcp_hlen; |
6757 | *cmdlenp |= NQTX_CMD_TSE; |
6758 | |
6759 | if (v4) { |
6760 | WM_Q_EVCNT_INCR(txq, txtso); |
6761 | *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; |
6762 | } else { |
6763 | WM_Q_EVCNT_INCR(txq, txtso6); |
6764 | *fieldsp |= NQTXD_FIELDS_TUXSM; |
6765 | } |
6766 | *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); |
6767 | KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); |
6768 | mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); |
6769 | KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); |
6770 | mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); |
6771 | KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); |
6772 | } else { |
6773 | *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); |
6774 | KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); |
6775 | } |
6776 | |
6777 | if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { |
6778 | *fieldsp |= NQTXD_FIELDS_IXSM; |
6779 | cmdc |= NQTXC_CMD_IP4; |
6780 | } |
6781 | |
6782 | if (m0->m_pkthdr.csum_flags & |
6783 | (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { |
6784 | WM_Q_EVCNT_INCR(txq, txtusum); |
6785 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) { |
6786 | cmdc |= NQTXC_CMD_TCP; |
6787 | } else { |
6788 | cmdc |= NQTXC_CMD_UDP; |
6789 | } |
6790 | cmdc |= NQTXC_CMD_IP4; |
6791 | *fieldsp |= NQTXD_FIELDS_TUXSM; |
6792 | } |
6793 | if (m0->m_pkthdr.csum_flags & |
6794 | (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { |
6795 | WM_Q_EVCNT_INCR(txq, txtusum6); |
6796 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) { |
6797 | cmdc |= NQTXC_CMD_TCP; |
6798 | } else { |
6799 | cmdc |= NQTXC_CMD_UDP; |
6800 | } |
6801 | cmdc |= NQTXC_CMD_IP6; |
6802 | *fieldsp |= NQTXD_FIELDS_TUXSM; |
6803 | } |
6804 | |
6805 | /* Fill in the context descriptor. */ |
6806 | txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len = |
6807 | htole32(vl_len); |
6808 | txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0; |
6809 | txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd = |
6810 | htole32(cmdc); |
6811 | txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx = |
6812 | htole32(mssidx); |
6813 | wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); |
6814 | DPRINTF(WM_DEBUG_TX, |
6815 | ("%s: TX: context desc %d 0x%08x%08x\n" , device_xname(sc->sc_dev), |
6816 | txq->txq_next, 0, vl_len)); |
6817 | DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n" , mssidx, cmdc)); |
6818 | txq->txq_next = WM_NEXTTX(txq, txq->txq_next); |
6819 | txs->txs_ndesc++; |
6820 | return 0; |
6821 | } |
6822 | |
6823 | /* |
6824 | * wm_nq_start: [ifnet interface function] |
6825 | * |
6826 | * Start packet transmission on the interface for NEWQUEUE devices |
6827 | */ |
6828 | static void |
6829 | wm_nq_start(struct ifnet *ifp) |
6830 | { |
6831 | struct wm_softc *sc = ifp->if_softc; |
6832 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
6833 | |
6834 | KASSERT(ifp->if_extflags & IFEF_START_MPSAFE); |
6835 | |
6836 | mutex_enter(txq->txq_lock); |
6837 | if (!txq->txq_stopping) |
6838 | wm_nq_start_locked(ifp); |
6839 | mutex_exit(txq->txq_lock); |
6840 | } |
6841 | |
6842 | static void |
6843 | wm_nq_start_locked(struct ifnet *ifp) |
6844 | { |
6845 | struct wm_softc *sc = ifp->if_softc; |
6846 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
6847 | |
6848 | wm_nq_send_common_locked(ifp, txq, false); |
6849 | } |
6850 | |
6851 | static inline int |
6852 | wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m) |
6853 | { |
6854 | struct wm_softc *sc = ifp->if_softc; |
6855 | u_int cpuid = cpu_index(curcpu()); |
6856 | |
6857 | /* |
6858 | * Currently, simple distribute strategy. |
6859 | * TODO: |
6860 | * destribute by flowid(RSS has value). |
6861 | */ |
6862 | return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues; |
6863 | } |
6864 | |
6865 | static int |
6866 | wm_nq_transmit(struct ifnet *ifp, struct mbuf *m) |
6867 | { |
6868 | int qid; |
6869 | struct wm_softc *sc = ifp->if_softc; |
6870 | struct wm_txqueue *txq; |
6871 | |
6872 | qid = wm_nq_select_txqueue(ifp, m); |
6873 | txq = &sc->sc_queue[qid].wmq_txq; |
6874 | |
6875 | if (__predict_false(!pcq_put(txq->txq_interq, m))) { |
6876 | m_freem(m); |
6877 | WM_Q_EVCNT_INCR(txq, txdrop); |
6878 | return ENOBUFS; |
6879 | } |
6880 | |
6881 | if (mutex_tryenter(txq->txq_lock)) { |
6882 | /* XXXX should be per TX queue */ |
6883 | ifp->if_obytes += m->m_pkthdr.len; |
6884 | if (m->m_flags & M_MCAST) |
6885 | ifp->if_omcasts++; |
6886 | |
6887 | if (!txq->txq_stopping) |
6888 | wm_nq_transmit_locked(ifp, txq); |
6889 | mutex_exit(txq->txq_lock); |
6890 | } |
6891 | |
6892 | return 0; |
6893 | } |
6894 | |
6895 | static void |
6896 | wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) |
6897 | { |
6898 | |
6899 | wm_nq_send_common_locked(ifp, txq, true); |
6900 | } |
6901 | |
6902 | static void |
6903 | wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, |
6904 | bool is_transmit) |
6905 | { |
6906 | struct wm_softc *sc = ifp->if_softc; |
6907 | struct mbuf *m0; |
6908 | struct m_tag *mtag; |
6909 | struct wm_txsoft *txs; |
6910 | bus_dmamap_t dmamap; |
6911 | int error, nexttx, lasttx = -1, seg, segs_needed; |
6912 | bool do_csum, sent; |
6913 | |
6914 | KASSERT(mutex_owned(txq->txq_lock)); |
6915 | |
6916 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
6917 | return; |
6918 | if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) |
6919 | return; |
6920 | |
6921 | sent = false; |
6922 | |
6923 | /* |
6924 | * Loop through the send queue, setting up transmit descriptors |
6925 | * until we drain the queue, or use up all available transmit |
6926 | * descriptors. |
6927 | */ |
6928 | for (;;) { |
6929 | m0 = NULL; |
6930 | |
6931 | /* Get a work queue entry. */ |
6932 | if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { |
6933 | wm_txeof(sc, txq); |
6934 | if (txq->txq_sfree == 0) { |
6935 | DPRINTF(WM_DEBUG_TX, |
6936 | ("%s: TX: no free job descriptors\n" , |
6937 | device_xname(sc->sc_dev))); |
6938 | WM_Q_EVCNT_INCR(txq, txsstall); |
6939 | break; |
6940 | } |
6941 | } |
6942 | |
6943 | /* Grab a packet off the queue. */ |
6944 | if (is_transmit) |
6945 | m0 = pcq_get(txq->txq_interq); |
6946 | else |
6947 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
6948 | if (m0 == NULL) |
6949 | break; |
6950 | |
6951 | DPRINTF(WM_DEBUG_TX, |
6952 | ("%s: TX: have packet to transmit: %p\n" , |
6953 | device_xname(sc->sc_dev), m0)); |
6954 | |
6955 | txs = &txq->txq_soft[txq->txq_snext]; |
6956 | dmamap = txs->txs_dmamap; |
6957 | |
6958 | /* |
6959 | * Load the DMA map. If this fails, the packet either |
6960 | * didn't fit in the allotted number of segments, or we |
6961 | * were short on resources. For the too-many-segments |
6962 | * case, we simply report an error and drop the packet, |
6963 | * since we can't sanely copy a jumbo packet to a single |
6964 | * buffer. |
6965 | */ |
6966 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
6967 | BUS_DMA_WRITE | BUS_DMA_NOWAIT); |
6968 | if (error) { |
6969 | if (error == EFBIG) { |
6970 | WM_Q_EVCNT_INCR(txq, txdrop); |
6971 | log(LOG_ERR, "%s: Tx packet consumes too many " |
6972 | "DMA segments, dropping...\n" , |
6973 | device_xname(sc->sc_dev)); |
6974 | wm_dump_mbuf_chain(sc, m0); |
6975 | m_freem(m0); |
6976 | continue; |
6977 | } |
6978 | /* Short on resources, just stop for now. */ |
6979 | DPRINTF(WM_DEBUG_TX, |
6980 | ("%s: TX: dmamap load failed: %d\n" , |
6981 | device_xname(sc->sc_dev), error)); |
6982 | break; |
6983 | } |
6984 | |
6985 | segs_needed = dmamap->dm_nsegs; |
6986 | |
6987 | /* |
6988 | * Ensure we have enough descriptors free to describe |
6989 | * the packet. Note, we always reserve one descriptor |
6990 | * at the end of the ring due to the semantics of the |
6991 | * TDT register, plus one more in the event we need |
6992 | * to load offload context. |
6993 | */ |
6994 | if (segs_needed > txq->txq_free - 2) { |
6995 | /* |
6996 | * Not enough free descriptors to transmit this |
6997 | * packet. We haven't committed anything yet, |
6998 | * so just unload the DMA map, put the packet |
6999 | * pack on the queue, and punt. Notify the upper |
7000 | * layer that there are no more slots left. |
7001 | */ |
7002 | DPRINTF(WM_DEBUG_TX, |
7003 | ("%s: TX: need %d (%d) descriptors, have %d\n" , |
7004 | device_xname(sc->sc_dev), dmamap->dm_nsegs, |
7005 | segs_needed, txq->txq_free - 1)); |
7006 | txq->txq_flags |= WM_TXQ_NO_SPACE; |
7007 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
7008 | WM_Q_EVCNT_INCR(txq, txdstall); |
7009 | break; |
7010 | } |
7011 | |
7012 | /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ |
7013 | |
7014 | DPRINTF(WM_DEBUG_TX, |
7015 | ("%s: TX: packet has %d (%d) DMA segments\n" , |
7016 | device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); |
7017 | |
7018 | WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); |
7019 | |
7020 | /* |
7021 | * Store a pointer to the packet so that we can free it |
7022 | * later. |
7023 | * |
7024 | * Initially, we consider the number of descriptors the |
7025 | * packet uses the number of DMA segments. This may be |
7026 | * incremented by 1 if we do checksum offload (a descriptor |
7027 | * is used to set the checksum context). |
7028 | */ |
7029 | txs->txs_mbuf = m0; |
7030 | txs->txs_firstdesc = txq->txq_next; |
7031 | txs->txs_ndesc = segs_needed; |
7032 | |
7033 | /* Set up offload parameters for this packet. */ |
7034 | uint32_t cmdlen, fields, dcmdlen; |
7035 | if (m0->m_pkthdr.csum_flags & |
7036 | (M_CSUM_TSOv4 | M_CSUM_TSOv6 | |
7037 | M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | |
7038 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { |
7039 | if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields, |
7040 | &do_csum) != 0) { |
7041 | /* Error message already displayed. */ |
7042 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
7043 | continue; |
7044 | } |
7045 | } else { |
7046 | do_csum = false; |
7047 | cmdlen = 0; |
7048 | fields = 0; |
7049 | } |
7050 | |
7051 | /* Sync the DMA map. */ |
7052 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
7053 | BUS_DMASYNC_PREWRITE); |
7054 | |
7055 | /* Initialize the first transmit descriptor. */ |
7056 | nexttx = txq->txq_next; |
7057 | if (!do_csum) { |
7058 | /* setup a legacy descriptor */ |
7059 | wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr, |
7060 | dmamap->dm_segs[0].ds_addr); |
7061 | txq->txq_descs[nexttx].wtx_cmdlen = |
7062 | htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); |
7063 | txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0; |
7064 | txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0; |
7065 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != |
7066 | NULL) { |
7067 | txq->txq_descs[nexttx].wtx_cmdlen |= |
7068 | htole32(WTX_CMD_VLE); |
7069 | txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = |
7070 | htole16(VLAN_TAG_VALUE(mtag) & 0xffff); |
7071 | } else { |
7072 | txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; |
7073 | } |
7074 | dcmdlen = 0; |
7075 | } else { |
7076 | /* setup an advanced data descriptor */ |
7077 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = |
7078 | htole64(dmamap->dm_segs[0].ds_addr); |
7079 | KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); |
7080 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = |
7081 | htole32(dmamap->dm_segs[0].ds_len | cmdlen ); |
7082 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = |
7083 | htole32(fields); |
7084 | DPRINTF(WM_DEBUG_TX, |
7085 | ("%s: TX: adv data desc %d 0x%" PRIx64 "\n" , |
7086 | device_xname(sc->sc_dev), nexttx, |
7087 | (uint64_t)dmamap->dm_segs[0].ds_addr)); |
7088 | DPRINTF(WM_DEBUG_TX, |
7089 | ("\t 0x%08x%08x\n" , fields, |
7090 | (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); |
7091 | dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; |
7092 | } |
7093 | |
7094 | lasttx = nexttx; |
7095 | nexttx = WM_NEXTTX(txq, nexttx); |
7096 | /* |
7097 | * fill in the next descriptors. legacy or adcanced format |
7098 | * is the same here |
7099 | */ |
7100 | for (seg = 1; seg < dmamap->dm_nsegs; |
7101 | seg++, nexttx = WM_NEXTTX(txq, nexttx)) { |
7102 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = |
7103 | htole64(dmamap->dm_segs[seg].ds_addr); |
7104 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = |
7105 | htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); |
7106 | KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); |
7107 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0; |
7108 | lasttx = nexttx; |
7109 | |
7110 | DPRINTF(WM_DEBUG_TX, |
7111 | ("%s: TX: desc %d: %#" PRIx64 ", " |
7112 | "len %#04zx\n" , |
7113 | device_xname(sc->sc_dev), nexttx, |
7114 | (uint64_t)dmamap->dm_segs[seg].ds_addr, |
7115 | dmamap->dm_segs[seg].ds_len)); |
7116 | } |
7117 | |
7118 | KASSERT(lasttx != -1); |
7119 | |
7120 | /* |
7121 | * Set up the command byte on the last descriptor of |
7122 | * the packet. If we're in the interrupt delay window, |
7123 | * delay the interrupt. |
7124 | */ |
7125 | KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == |
7126 | (NQTX_CMD_EOP | NQTX_CMD_RS)); |
7127 | txq->txq_descs[lasttx].wtx_cmdlen |= |
7128 | htole32(WTX_CMD_EOP | WTX_CMD_RS); |
7129 | |
7130 | txs->txs_lastdesc = lasttx; |
7131 | |
7132 | DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n" , |
7133 | device_xname(sc->sc_dev), |
7134 | lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); |
7135 | |
7136 | /* Sync the descriptors we're using. */ |
7137 | wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, |
7138 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
7139 | |
7140 | /* Give the packet to the chip. */ |
7141 | CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); |
7142 | sent = true; |
7143 | |
7144 | DPRINTF(WM_DEBUG_TX, |
7145 | ("%s: TX: TDT -> %d\n" , device_xname(sc->sc_dev), nexttx)); |
7146 | |
7147 | DPRINTF(WM_DEBUG_TX, |
7148 | ("%s: TX: finished transmitting packet, job %d\n" , |
7149 | device_xname(sc->sc_dev), txq->txq_snext)); |
7150 | |
7151 | /* Advance the tx pointer. */ |
7152 | txq->txq_free -= txs->txs_ndesc; |
7153 | txq->txq_next = nexttx; |
7154 | |
7155 | txq->txq_sfree--; |
7156 | txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); |
7157 | |
7158 | /* Pass the packet to any BPF listeners. */ |
7159 | bpf_mtap(ifp, m0); |
7160 | } |
7161 | |
7162 | if (m0 != NULL) { |
7163 | txq->txq_flags |= WM_TXQ_NO_SPACE; |
7164 | WM_Q_EVCNT_INCR(txq, txdrop); |
7165 | DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n" , |
7166 | __func__)); |
7167 | m_freem(m0); |
7168 | } |
7169 | |
7170 | if (txq->txq_sfree == 0 || txq->txq_free <= 2) { |
7171 | /* No more slots; notify upper layer. */ |
7172 | txq->txq_flags |= WM_TXQ_NO_SPACE; |
7173 | } |
7174 | |
7175 | if (sent) { |
7176 | /* Set a watchdog timer in case the chip flakes out. */ |
7177 | ifp->if_timer = 5; |
7178 | } |
7179 | } |
7180 | |
7181 | /* Interrupt */ |
7182 | |
7183 | /* |
7184 | * wm_txeof: |
7185 | * |
7186 | * Helper; handle transmit interrupts. |
7187 | */ |
7188 | static int |
7189 | wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq) |
7190 | { |
7191 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
7192 | struct wm_txsoft *txs; |
7193 | bool processed = false; |
7194 | int count = 0; |
7195 | int i; |
7196 | uint8_t status; |
7197 | |
7198 | KASSERT(mutex_owned(txq->txq_lock)); |
7199 | |
7200 | if (txq->txq_stopping) |
7201 | return 0; |
7202 | |
7203 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
7204 | txq->txq_flags &= ~WM_TXQ_NO_SPACE; |
7205 | else |
7206 | ifp->if_flags &= ~IFF_OACTIVE; |
7207 | |
7208 | /* |
7209 | * Go through the Tx list and free mbufs for those |
7210 | * frames which have been transmitted. |
7211 | */ |
7212 | for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq); |
7213 | i = WM_NEXTTXS(txq, i), txq->txq_sfree++) { |
7214 | txs = &txq->txq_soft[i]; |
7215 | |
7216 | DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n" , |
7217 | device_xname(sc->sc_dev), i)); |
7218 | |
7219 | wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc, |
7220 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
7221 | |
7222 | status = |
7223 | txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status; |
7224 | if ((status & WTX_ST_DD) == 0) { |
7225 | wm_cdtxsync(txq, txs->txs_lastdesc, 1, |
7226 | BUS_DMASYNC_PREREAD); |
7227 | break; |
7228 | } |
7229 | |
7230 | processed = true; |
7231 | count++; |
7232 | DPRINTF(WM_DEBUG_TX, |
7233 | ("%s: TX: job %d done: descs %d..%d\n" , |
7234 | device_xname(sc->sc_dev), i, txs->txs_firstdesc, |
7235 | txs->txs_lastdesc)); |
7236 | |
7237 | /* |
7238 | * XXX We should probably be using the statistics |
7239 | * XXX registers, but I don't know if they exist |
7240 | * XXX on chips before the i82544. |
7241 | */ |
7242 | |
7243 | #ifdef WM_EVENT_COUNTERS |
7244 | if (status & WTX_ST_TU) |
7245 | WM_Q_EVCNT_INCR(txq, tu); |
7246 | #endif /* WM_EVENT_COUNTERS */ |
7247 | |
7248 | if (status & (WTX_ST_EC | WTX_ST_LC)) { |
7249 | ifp->if_oerrors++; |
7250 | if (status & WTX_ST_LC) |
7251 | log(LOG_WARNING, "%s: late collision\n" , |
7252 | device_xname(sc->sc_dev)); |
7253 | else if (status & WTX_ST_EC) { |
7254 | ifp->if_collisions += 16; |
7255 | log(LOG_WARNING, "%s: excessive collisions\n" , |
7256 | device_xname(sc->sc_dev)); |
7257 | } |
7258 | } else |
7259 | ifp->if_opackets++; |
7260 | |
7261 | txq->txq_free += txs->txs_ndesc; |
7262 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, |
7263 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
7264 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
7265 | m_freem(txs->txs_mbuf); |
7266 | txs->txs_mbuf = NULL; |
7267 | } |
7268 | |
7269 | /* Update the dirty transmit buffer pointer. */ |
7270 | txq->txq_sdirty = i; |
7271 | DPRINTF(WM_DEBUG_TX, |
7272 | ("%s: TX: txsdirty -> %d\n" , device_xname(sc->sc_dev), i)); |
7273 | |
7274 | if (count != 0) |
7275 | rnd_add_uint32(&sc->rnd_source, count); |
7276 | |
7277 | /* |
7278 | * If there are no more pending transmissions, cancel the watchdog |
7279 | * timer. |
7280 | */ |
7281 | if (txq->txq_sfree == WM_TXQUEUELEN(txq)) |
7282 | ifp->if_timer = 0; |
7283 | |
7284 | return processed; |
7285 | } |
7286 | |
7287 | /* |
7288 | * wm_rxeof: |
7289 | * |
7290 | * Helper; handle receive interrupts. |
7291 | */ |
7292 | static void |
7293 | wm_rxeof(struct wm_rxqueue *rxq) |
7294 | { |
7295 | struct wm_softc *sc = rxq->rxq_sc; |
7296 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
7297 | struct wm_rxsoft *rxs; |
7298 | struct mbuf *m; |
7299 | int i, len; |
7300 | int count = 0; |
7301 | uint8_t status, errors; |
7302 | uint16_t vlantag; |
7303 | |
7304 | KASSERT(mutex_owned(rxq->rxq_lock)); |
7305 | |
7306 | for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { |
7307 | rxs = &rxq->rxq_soft[i]; |
7308 | |
7309 | DPRINTF(WM_DEBUG_RX, |
7310 | ("%s: RX: checking descriptor %d\n" , |
7311 | device_xname(sc->sc_dev), i)); |
7312 | |
7313 | wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
7314 | |
7315 | status = rxq->rxq_descs[i].wrx_status; |
7316 | errors = rxq->rxq_descs[i].wrx_errors; |
7317 | len = le16toh(rxq->rxq_descs[i].wrx_len); |
7318 | vlantag = rxq->rxq_descs[i].wrx_special; |
7319 | |
7320 | if ((status & WRX_ST_DD) == 0) { |
7321 | /* We have processed all of the receive descriptors. */ |
7322 | wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD); |
7323 | break; |
7324 | } |
7325 | |
7326 | count++; |
7327 | if (__predict_false(rxq->rxq_discard)) { |
7328 | DPRINTF(WM_DEBUG_RX, |
7329 | ("%s: RX: discarding contents of descriptor %d\n" , |
7330 | device_xname(sc->sc_dev), i)); |
7331 | wm_init_rxdesc(rxq, i); |
7332 | if (status & WRX_ST_EOP) { |
7333 | /* Reset our state. */ |
7334 | DPRINTF(WM_DEBUG_RX, |
7335 | ("%s: RX: resetting rxdiscard -> 0\n" , |
7336 | device_xname(sc->sc_dev))); |
7337 | rxq->rxq_discard = 0; |
7338 | } |
7339 | continue; |
7340 | } |
7341 | |
7342 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
7343 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
7344 | |
7345 | m = rxs->rxs_mbuf; |
7346 | |
7347 | /* |
7348 | * Add a new receive buffer to the ring, unless of |
7349 | * course the length is zero. Treat the latter as a |
7350 | * failed mapping. |
7351 | */ |
7352 | if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) { |
7353 | /* |
7354 | * Failed, throw away what we've done so |
7355 | * far, and discard the rest of the packet. |
7356 | */ |
7357 | ifp->if_ierrors++; |
7358 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
7359 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
7360 | wm_init_rxdesc(rxq, i); |
7361 | if ((status & WRX_ST_EOP) == 0) |
7362 | rxq->rxq_discard = 1; |
7363 | if (rxq->rxq_head != NULL) |
7364 | m_freem(rxq->rxq_head); |
7365 | WM_RXCHAIN_RESET(rxq); |
7366 | DPRINTF(WM_DEBUG_RX, |
7367 | ("%s: RX: Rx buffer allocation failed, " |
7368 | "dropping packet%s\n" , device_xname(sc->sc_dev), |
7369 | rxq->rxq_discard ? " (discard)" : "" )); |
7370 | continue; |
7371 | } |
7372 | |
7373 | m->m_len = len; |
7374 | rxq->rxq_len += len; |
7375 | DPRINTF(WM_DEBUG_RX, |
7376 | ("%s: RX: buffer at %p len %d\n" , |
7377 | device_xname(sc->sc_dev), m->m_data, len)); |
7378 | |
7379 | /* If this is not the end of the packet, keep looking. */ |
7380 | if ((status & WRX_ST_EOP) == 0) { |
7381 | WM_RXCHAIN_LINK(rxq, m); |
7382 | DPRINTF(WM_DEBUG_RX, |
7383 | ("%s: RX: not yet EOP, rxlen -> %d\n" , |
7384 | device_xname(sc->sc_dev), rxq->rxq_len)); |
7385 | continue; |
7386 | } |
7387 | |
7388 | /* |
7389 | * Okay, we have the entire packet now. The chip is |
7390 | * configured to include the FCS except I350 and I21[01] |
7391 | * (not all chips can be configured to strip it), |
7392 | * so we need to trim it. |
7393 | * May need to adjust length of previous mbuf in the |
7394 | * chain if the current mbuf is too short. |
7395 | * For an eratta, the RCTL_SECRC bit in RCTL register |
7396 | * is always set in I350, so we don't trim it. |
7397 | */ |
7398 | if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354) |
7399 | && (sc->sc_type != WM_T_I210) |
7400 | && (sc->sc_type != WM_T_I211)) { |
7401 | if (m->m_len < ETHER_CRC_LEN) { |
7402 | rxq->rxq_tail->m_len |
7403 | -= (ETHER_CRC_LEN - m->m_len); |
7404 | m->m_len = 0; |
7405 | } else |
7406 | m->m_len -= ETHER_CRC_LEN; |
7407 | len = rxq->rxq_len - ETHER_CRC_LEN; |
7408 | } else |
7409 | len = rxq->rxq_len; |
7410 | |
7411 | WM_RXCHAIN_LINK(rxq, m); |
7412 | |
7413 | *rxq->rxq_tailp = NULL; |
7414 | m = rxq->rxq_head; |
7415 | |
7416 | WM_RXCHAIN_RESET(rxq); |
7417 | |
7418 | DPRINTF(WM_DEBUG_RX, |
7419 | ("%s: RX: have entire packet, len -> %d\n" , |
7420 | device_xname(sc->sc_dev), len)); |
7421 | |
7422 | /* If an error occurred, update stats and drop the packet. */ |
7423 | if (errors & |
7424 | (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { |
7425 | if (errors & WRX_ER_SE) |
7426 | log(LOG_WARNING, "%s: symbol error\n" , |
7427 | device_xname(sc->sc_dev)); |
7428 | else if (errors & WRX_ER_SEQ) |
7429 | log(LOG_WARNING, "%s: receive sequence error\n" , |
7430 | device_xname(sc->sc_dev)); |
7431 | else if (errors & WRX_ER_CE) |
7432 | log(LOG_WARNING, "%s: CRC error\n" , |
7433 | device_xname(sc->sc_dev)); |
7434 | m_freem(m); |
7435 | continue; |
7436 | } |
7437 | |
7438 | /* No errors. Receive the packet. */ |
7439 | m_set_rcvif(m, ifp); |
7440 | m->m_pkthdr.len = len; |
7441 | |
7442 | /* |
7443 | * If VLANs are enabled, VLAN packets have been unwrapped |
7444 | * for us. Associate the tag with the packet. |
7445 | */ |
7446 | /* XXXX should check for i350 and i354 */ |
7447 | if ((status & WRX_ST_VP) != 0) { |
7448 | VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue); |
7449 | } |
7450 | |
7451 | /* Set up checksum info for this packet. */ |
7452 | if ((status & WRX_ST_IXSM) == 0) { |
7453 | if (status & WRX_ST_IPCS) { |
7454 | WM_Q_EVCNT_INCR(rxq, rxipsum); |
7455 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
7456 | if (errors & WRX_ER_IPE) |
7457 | m->m_pkthdr.csum_flags |= |
7458 | M_CSUM_IPv4_BAD; |
7459 | } |
7460 | if (status & WRX_ST_TCPCS) { |
7461 | /* |
7462 | * Note: we don't know if this was TCP or UDP, |
7463 | * so we just set both bits, and expect the |
7464 | * upper layers to deal. |
7465 | */ |
7466 | WM_Q_EVCNT_INCR(rxq, rxtusum); |
7467 | m->m_pkthdr.csum_flags |= |
7468 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | |
7469 | M_CSUM_TCPv6 | M_CSUM_UDPv6; |
7470 | if (errors & WRX_ER_TCPE) |
7471 | m->m_pkthdr.csum_flags |= |
7472 | M_CSUM_TCP_UDP_BAD; |
7473 | } |
7474 | } |
7475 | |
7476 | ifp->if_ipackets++; |
7477 | |
7478 | mutex_exit(rxq->rxq_lock); |
7479 | |
7480 | /* Pass this up to any BPF listeners. */ |
7481 | bpf_mtap(ifp, m); |
7482 | |
7483 | /* Pass it on. */ |
7484 | if_percpuq_enqueue(sc->sc_ipq, m); |
7485 | |
7486 | mutex_enter(rxq->rxq_lock); |
7487 | |
7488 | if (rxq->rxq_stopping) |
7489 | break; |
7490 | } |
7491 | |
7492 | /* Update the receive pointer. */ |
7493 | rxq->rxq_ptr = i; |
7494 | if (count != 0) |
7495 | rnd_add_uint32(&sc->rnd_source, count); |
7496 | |
7497 | DPRINTF(WM_DEBUG_RX, |
7498 | ("%s: RX: rxptr -> %d\n" , device_xname(sc->sc_dev), i)); |
7499 | } |
7500 | |
7501 | /* |
7502 | * wm_linkintr_gmii: |
7503 | * |
7504 | * Helper; handle link interrupts for GMII. |
7505 | */ |
7506 | static void |
7507 | wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) |
7508 | { |
7509 | |
7510 | KASSERT(WM_CORE_LOCKED(sc)); |
7511 | |
7512 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n" , device_xname(sc->sc_dev), |
7513 | __func__)); |
7514 | |
7515 | if (icr & ICR_LSC) { |
7516 | uint32_t reg; |
7517 | uint32_t status = CSR_READ(sc, WMREG_STATUS); |
7518 | |
7519 | if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0)) |
7520 | wm_gig_downshift_workaround_ich8lan(sc); |
7521 | |
7522 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n" , |
7523 | device_xname(sc->sc_dev))); |
7524 | mii_pollstat(&sc->sc_mii); |
7525 | if (sc->sc_type == WM_T_82543) { |
7526 | int miistatus, active; |
7527 | |
7528 | /* |
7529 | * With 82543, we need to force speed and |
7530 | * duplex on the MAC equal to what the PHY |
7531 | * speed and duplex configuration is. |
7532 | */ |
7533 | miistatus = sc->sc_mii.mii_media_status; |
7534 | |
7535 | if (miistatus & IFM_ACTIVE) { |
7536 | active = sc->sc_mii.mii_media_active; |
7537 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); |
7538 | switch (IFM_SUBTYPE(active)) { |
7539 | case IFM_10_T: |
7540 | sc->sc_ctrl |= CTRL_SPEED_10; |
7541 | break; |
7542 | case IFM_100_TX: |
7543 | sc->sc_ctrl |= CTRL_SPEED_100; |
7544 | break; |
7545 | case IFM_1000_T: |
7546 | sc->sc_ctrl |= CTRL_SPEED_1000; |
7547 | break; |
7548 | default: |
7549 | /* |
7550 | * fiber? |
7551 | * Shoud not enter here. |
7552 | */ |
7553 | printf("unknown media (%x)\n" , active); |
7554 | break; |
7555 | } |
7556 | if (active & IFM_FDX) |
7557 | sc->sc_ctrl |= CTRL_FD; |
7558 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7559 | } |
7560 | } else if ((sc->sc_type == WM_T_ICH8) |
7561 | && (sc->sc_phytype == WMPHY_IGP_3)) { |
7562 | wm_kmrn_lock_loss_workaround_ich8lan(sc); |
7563 | } else if (sc->sc_type == WM_T_PCH) { |
7564 | wm_k1_gig_workaround_hv(sc, |
7565 | ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); |
7566 | } |
7567 | |
7568 | if ((sc->sc_phytype == WMPHY_82578) |
7569 | && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) |
7570 | == IFM_1000_T)) { |
7571 | |
7572 | if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { |
7573 | delay(200*1000); /* XXX too big */ |
7574 | |
7575 | /* Link stall fix for link up */ |
7576 | wm_gmii_hv_writereg(sc->sc_dev, 1, |
7577 | HV_MUX_DATA_CTRL, |
7578 | HV_MUX_DATA_CTRL_GEN_TO_MAC |
7579 | | HV_MUX_DATA_CTRL_FORCE_SPEED); |
7580 | wm_gmii_hv_writereg(sc->sc_dev, 1, |
7581 | HV_MUX_DATA_CTRL, |
7582 | HV_MUX_DATA_CTRL_GEN_TO_MAC); |
7583 | } |
7584 | } |
7585 | /* |
7586 | * I217 Packet Loss issue: |
7587 | * ensure that FEXTNVM4 Beacon Duration is set correctly |
7588 | * on power up. |
7589 | * Set the Beacon Duration for I217 to 8 usec |
7590 | */ |
7591 | if ((sc->sc_type == WM_T_PCH_LPT) |
7592 | || (sc->sc_type == WM_T_PCH_SPT)) { |
7593 | reg = CSR_READ(sc, WMREG_FEXTNVM4); |
7594 | reg &= ~FEXTNVM4_BEACON_DURATION; |
7595 | reg |= FEXTNVM4_BEACON_DURATION_8US; |
7596 | CSR_WRITE(sc, WMREG_FEXTNVM4, reg); |
7597 | } |
7598 | |
7599 | /* XXX Work-around I218 hang issue */ |
7600 | /* e1000_k1_workaround_lpt_lp() */ |
7601 | |
7602 | if ((sc->sc_type == WM_T_PCH_LPT) |
7603 | || (sc->sc_type == WM_T_PCH_SPT)) { |
7604 | /* |
7605 | * Set platform power management values for Latency |
7606 | * Tolerance Reporting (LTR) |
7607 | */ |
7608 | wm_platform_pm_pch_lpt(sc, |
7609 | ((sc->sc_mii.mii_media_status & IFM_ACTIVE) |
7610 | != 0)); |
7611 | } |
7612 | |
7613 | /* FEXTNVM6 K1-off workaround */ |
7614 | if (sc->sc_type == WM_T_PCH_SPT) { |
7615 | reg = CSR_READ(sc, WMREG_FEXTNVM6); |
7616 | if (CSR_READ(sc, WMREG_PCIEANACFG) |
7617 | & FEXTNVM6_K1_OFF_ENABLE) |
7618 | reg |= FEXTNVM6_K1_OFF_ENABLE; |
7619 | else |
7620 | reg &= ~FEXTNVM6_K1_OFF_ENABLE; |
7621 | CSR_WRITE(sc, WMREG_FEXTNVM6, reg); |
7622 | } |
7623 | } else if (icr & ICR_RXSEQ) { |
7624 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n" , |
7625 | device_xname(sc->sc_dev))); |
7626 | } |
7627 | } |
7628 | |
7629 | /* |
7630 | * wm_linkintr_tbi: |
7631 | * |
7632 | * Helper; handle link interrupts for TBI mode. |
7633 | */ |
7634 | static void |
7635 | wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) |
7636 | { |
7637 | uint32_t status; |
7638 | |
7639 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n" , device_xname(sc->sc_dev), |
7640 | __func__)); |
7641 | |
7642 | status = CSR_READ(sc, WMREG_STATUS); |
7643 | if (icr & ICR_LSC) { |
7644 | if (status & STATUS_LU) { |
7645 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n" , |
7646 | device_xname(sc->sc_dev), |
7647 | (status & STATUS_FD) ? "FDX" : "HDX" )); |
7648 | /* |
7649 | * NOTE: CTRL will update TFCE and RFCE automatically, |
7650 | * so we should update sc->sc_ctrl |
7651 | */ |
7652 | |
7653 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); |
7654 | sc->sc_tctl &= ~TCTL_COLD(0x3ff); |
7655 | sc->sc_fcrtl &= ~FCRTL_XONE; |
7656 | if (status & STATUS_FD) |
7657 | sc->sc_tctl |= |
7658 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
7659 | else |
7660 | sc->sc_tctl |= |
7661 | TCTL_COLD(TX_COLLISION_DISTANCE_HDX); |
7662 | if (sc->sc_ctrl & CTRL_TFCE) |
7663 | sc->sc_fcrtl |= FCRTL_XONE; |
7664 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
7665 | CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? |
7666 | WMREG_OLD_FCRTL : WMREG_FCRTL, |
7667 | sc->sc_fcrtl); |
7668 | sc->sc_tbi_linkup = 1; |
7669 | } else { |
7670 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n" , |
7671 | device_xname(sc->sc_dev))); |
7672 | sc->sc_tbi_linkup = 0; |
7673 | } |
7674 | /* Update LED */ |
7675 | wm_tbi_serdes_set_linkled(sc); |
7676 | } else if (icr & ICR_RXSEQ) { |
7677 | DPRINTF(WM_DEBUG_LINK, |
7678 | ("%s: LINK: Receive sequence error\n" , |
7679 | device_xname(sc->sc_dev))); |
7680 | } |
7681 | } |
7682 | |
7683 | /* |
7684 | * wm_linkintr_serdes: |
7685 | * |
7686 | * Helper; handle link interrupts for TBI mode. |
7687 | */ |
7688 | static void |
7689 | wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr) |
7690 | { |
7691 | struct mii_data *mii = &sc->sc_mii; |
7692 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; |
7693 | uint32_t pcs_adv, pcs_lpab, reg; |
7694 | |
7695 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n" , device_xname(sc->sc_dev), |
7696 | __func__)); |
7697 | |
7698 | if (icr & ICR_LSC) { |
7699 | /* Check PCS */ |
7700 | reg = CSR_READ(sc, WMREG_PCS_LSTS); |
7701 | if ((reg & PCS_LSTS_LINKOK) != 0) { |
7702 | mii->mii_media_status |= IFM_ACTIVE; |
7703 | sc->sc_tbi_linkup = 1; |
7704 | } else { |
7705 | mii->mii_media_status |= IFM_NONE; |
7706 | sc->sc_tbi_linkup = 0; |
7707 | wm_tbi_serdes_set_linkled(sc); |
7708 | return; |
7709 | } |
7710 | mii->mii_media_active |= IFM_1000_SX; |
7711 | if ((reg & PCS_LSTS_FDX) != 0) |
7712 | mii->mii_media_active |= IFM_FDX; |
7713 | else |
7714 | mii->mii_media_active |= IFM_HDX; |
7715 | if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { |
7716 | /* Check flow */ |
7717 | reg = CSR_READ(sc, WMREG_PCS_LSTS); |
7718 | if ((reg & PCS_LSTS_AN_COMP) == 0) { |
7719 | DPRINTF(WM_DEBUG_LINK, |
7720 | ("XXX LINKOK but not ACOMP\n" )); |
7721 | return; |
7722 | } |
7723 | pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); |
7724 | pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); |
7725 | DPRINTF(WM_DEBUG_LINK, |
7726 | ("XXX AN result %08x, %08x\n" , pcs_adv, pcs_lpab)); |
7727 | if ((pcs_adv & TXCW_SYM_PAUSE) |
7728 | && (pcs_lpab & TXCW_SYM_PAUSE)) { |
7729 | mii->mii_media_active |= IFM_FLOW |
7730 | | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
7731 | } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) |
7732 | && (pcs_adv & TXCW_ASYM_PAUSE) |
7733 | && (pcs_lpab & TXCW_SYM_PAUSE) |
7734 | && (pcs_lpab & TXCW_ASYM_PAUSE)) |
7735 | mii->mii_media_active |= IFM_FLOW |
7736 | | IFM_ETH_TXPAUSE; |
7737 | else if ((pcs_adv & TXCW_SYM_PAUSE) |
7738 | && (pcs_adv & TXCW_ASYM_PAUSE) |
7739 | && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) |
7740 | && (pcs_lpab & TXCW_ASYM_PAUSE)) |
7741 | mii->mii_media_active |= IFM_FLOW |
7742 | | IFM_ETH_RXPAUSE; |
7743 | } |
7744 | /* Update LED */ |
7745 | wm_tbi_serdes_set_linkled(sc); |
7746 | } else { |
7747 | DPRINTF(WM_DEBUG_LINK, |
7748 | ("%s: LINK: Receive sequence error\n" , |
7749 | device_xname(sc->sc_dev))); |
7750 | } |
7751 | } |
7752 | |
7753 | /* |
7754 | * wm_linkintr: |
7755 | * |
7756 | * Helper; handle link interrupts. |
7757 | */ |
7758 | static void |
7759 | wm_linkintr(struct wm_softc *sc, uint32_t icr) |
7760 | { |
7761 | |
7762 | KASSERT(WM_CORE_LOCKED(sc)); |
7763 | |
7764 | if (sc->sc_flags & WM_F_HAS_MII) |
7765 | wm_linkintr_gmii(sc, icr); |
7766 | else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) |
7767 | && (sc->sc_type >= WM_T_82575)) |
7768 | wm_linkintr_serdes(sc, icr); |
7769 | else |
7770 | wm_linkintr_tbi(sc, icr); |
7771 | } |
7772 | |
7773 | /* |
7774 | * wm_intr_legacy: |
7775 | * |
7776 | * Interrupt service routine for INTx and MSI. |
7777 | */ |
7778 | static int |
7779 | wm_intr_legacy(void *arg) |
7780 | { |
7781 | struct wm_softc *sc = arg; |
7782 | struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; |
7783 | struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq; |
7784 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
7785 | uint32_t icr, rndval = 0; |
7786 | int handled = 0; |
7787 | |
7788 | DPRINTF(WM_DEBUG_TX, |
7789 | ("%s: INTx: got intr\n" , device_xname(sc->sc_dev))); |
7790 | while (1 /* CONSTCOND */) { |
7791 | icr = CSR_READ(sc, WMREG_ICR); |
7792 | if ((icr & sc->sc_icr) == 0) |
7793 | break; |
7794 | if (rndval == 0) |
7795 | rndval = icr; |
7796 | |
7797 | mutex_enter(rxq->rxq_lock); |
7798 | |
7799 | if (rxq->rxq_stopping) { |
7800 | mutex_exit(rxq->rxq_lock); |
7801 | break; |
7802 | } |
7803 | |
7804 | handled = 1; |
7805 | |
7806 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) |
7807 | if (icr & (ICR_RXDMT0 | ICR_RXT0)) { |
7808 | DPRINTF(WM_DEBUG_RX, |
7809 | ("%s: RX: got Rx intr 0x%08x\n" , |
7810 | device_xname(sc->sc_dev), |
7811 | icr & (ICR_RXDMT0 | ICR_RXT0))); |
7812 | WM_Q_EVCNT_INCR(rxq, rxintr); |
7813 | } |
7814 | #endif |
7815 | wm_rxeof(rxq); |
7816 | |
7817 | mutex_exit(rxq->rxq_lock); |
7818 | mutex_enter(txq->txq_lock); |
7819 | |
7820 | if (txq->txq_stopping) { |
7821 | mutex_exit(txq->txq_lock); |
7822 | break; |
7823 | } |
7824 | |
7825 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) |
7826 | if (icr & ICR_TXDW) { |
7827 | DPRINTF(WM_DEBUG_TX, |
7828 | ("%s: TX: got TXDW interrupt\n" , |
7829 | device_xname(sc->sc_dev))); |
7830 | WM_Q_EVCNT_INCR(txq, txdw); |
7831 | } |
7832 | #endif |
7833 | wm_txeof(sc, txq); |
7834 | |
7835 | mutex_exit(txq->txq_lock); |
7836 | WM_CORE_LOCK(sc); |
7837 | |
7838 | if (sc->sc_core_stopping) { |
7839 | WM_CORE_UNLOCK(sc); |
7840 | break; |
7841 | } |
7842 | |
7843 | if (icr & (ICR_LSC | ICR_RXSEQ)) { |
7844 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); |
7845 | wm_linkintr(sc, icr); |
7846 | } |
7847 | |
7848 | WM_CORE_UNLOCK(sc); |
7849 | |
7850 | if (icr & ICR_RXO) { |
7851 | #if defined(WM_DEBUG) |
7852 | log(LOG_WARNING, "%s: Receive overrun\n" , |
7853 | device_xname(sc->sc_dev)); |
7854 | #endif /* defined(WM_DEBUG) */ |
7855 | } |
7856 | } |
7857 | |
7858 | rnd_add_uint32(&sc->rnd_source, rndval); |
7859 | |
7860 | if (handled) { |
7861 | /* Try to get more packets going. */ |
7862 | ifp->if_start(ifp); |
7863 | } |
7864 | |
7865 | return handled; |
7866 | } |
7867 | |
7868 | static int |
7869 | wm_txrxintr_msix(void *arg) |
7870 | { |
7871 | struct wm_queue *wmq = arg; |
7872 | struct wm_txqueue *txq = &wmq->wmq_txq; |
7873 | struct wm_rxqueue *rxq = &wmq->wmq_rxq; |
7874 | struct wm_softc *sc = txq->txq_sc; |
7875 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
7876 | |
7877 | KASSERT(wmq->wmq_intr_idx == wmq->wmq_id); |
7878 | |
7879 | DPRINTF(WM_DEBUG_TX, |
7880 | ("%s: TX: got Tx intr\n" , device_xname(sc->sc_dev))); |
7881 | |
7882 | if (sc->sc_type == WM_T_82574) |
7883 | CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); |
7884 | else if (sc->sc_type == WM_T_82575) |
7885 | CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); |
7886 | else |
7887 | CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx); |
7888 | |
7889 | mutex_enter(txq->txq_lock); |
7890 | |
7891 | if (txq->txq_stopping) { |
7892 | mutex_exit(txq->txq_lock); |
7893 | return 0; |
7894 | } |
7895 | |
7896 | WM_Q_EVCNT_INCR(txq, txdw); |
7897 | wm_txeof(sc, txq); |
7898 | |
7899 | /* Try to get more packets going. */ |
7900 | if (pcq_peek(txq->txq_interq) != NULL) |
7901 | wm_nq_transmit_locked(ifp, txq); |
7902 | /* |
7903 | * There are still some upper layer processing which call |
7904 | * ifp->if_start(). e.g. ALTQ |
7905 | */ |
7906 | if (wmq->wmq_id == 0) { |
7907 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
7908 | wm_nq_start_locked(ifp); |
7909 | } |
7910 | |
7911 | mutex_exit(txq->txq_lock); |
7912 | |
7913 | DPRINTF(WM_DEBUG_RX, |
7914 | ("%s: RX: got Rx intr\n" , device_xname(sc->sc_dev))); |
7915 | mutex_enter(rxq->rxq_lock); |
7916 | |
7917 | if (rxq->rxq_stopping) { |
7918 | mutex_exit(rxq->rxq_lock); |
7919 | return 0; |
7920 | } |
7921 | |
7922 | WM_Q_EVCNT_INCR(rxq, rxintr); |
7923 | wm_rxeof(rxq); |
7924 | mutex_exit(rxq->rxq_lock); |
7925 | |
7926 | if (sc->sc_type == WM_T_82574) |
7927 | CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); |
7928 | else if (sc->sc_type == WM_T_82575) |
7929 | CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); |
7930 | else |
7931 | CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx); |
7932 | |
7933 | return 1; |
7934 | } |
7935 | |
7936 | /* |
7937 | * wm_linkintr_msix: |
7938 | * |
7939 | * Interrupt service routine for link status change for MSI-X. |
7940 | */ |
7941 | static int |
7942 | wm_linkintr_msix(void *arg) |
7943 | { |
7944 | struct wm_softc *sc = arg; |
7945 | uint32_t reg; |
7946 | |
7947 | DPRINTF(WM_DEBUG_LINK, |
7948 | ("%s: LINK: got link intr\n" , device_xname(sc->sc_dev))); |
7949 | |
7950 | reg = CSR_READ(sc, WMREG_ICR); |
7951 | WM_CORE_LOCK(sc); |
7952 | if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0)) |
7953 | goto out; |
7954 | |
7955 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); |
7956 | wm_linkintr(sc, ICR_LSC); |
7957 | |
7958 | out: |
7959 | WM_CORE_UNLOCK(sc); |
7960 | |
7961 | if (sc->sc_type == WM_T_82574) |
7962 | CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); |
7963 | else if (sc->sc_type == WM_T_82575) |
7964 | CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); |
7965 | else |
7966 | CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); |
7967 | |
7968 | return 1; |
7969 | } |
7970 | |
7971 | /* |
7972 | * Media related. |
7973 | * GMII, SGMII, TBI (and SERDES) |
7974 | */ |
7975 | |
7976 | /* Common */ |
7977 | |
7978 | /* |
7979 | * wm_tbi_serdes_set_linkled: |
7980 | * |
7981 | * Update the link LED on TBI and SERDES devices. |
7982 | */ |
7983 | static void |
7984 | wm_tbi_serdes_set_linkled(struct wm_softc *sc) |
7985 | { |
7986 | |
7987 | if (sc->sc_tbi_linkup) |
7988 | sc->sc_ctrl |= CTRL_SWDPIN(0); |
7989 | else |
7990 | sc->sc_ctrl &= ~CTRL_SWDPIN(0); |
7991 | |
7992 | /* 82540 or newer devices are active low */ |
7993 | sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; |
7994 | |
7995 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7996 | } |
7997 | |
7998 | /* GMII related */ |
7999 | |
8000 | /* |
8001 | * wm_gmii_reset: |
8002 | * |
8003 | * Reset the PHY. |
8004 | */ |
8005 | static void |
8006 | wm_gmii_reset(struct wm_softc *sc) |
8007 | { |
8008 | uint32_t reg; |
8009 | int rv; |
8010 | |
8011 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
8012 | device_xname(sc->sc_dev), __func__)); |
8013 | |
8014 | rv = sc->phy.acquire(sc); |
8015 | if (rv != 0) { |
8016 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
8017 | __func__); |
8018 | return; |
8019 | } |
8020 | |
8021 | switch (sc->sc_type) { |
8022 | case WM_T_82542_2_0: |
8023 | case WM_T_82542_2_1: |
8024 | /* null */ |
8025 | break; |
8026 | case WM_T_82543: |
8027 | /* |
8028 | * With 82543, we need to force speed and duplex on the MAC |
8029 | * equal to what the PHY speed and duplex configuration is. |
8030 | * In addition, we need to perform a hardware reset on the PHY |
8031 | * to take it out of reset. |
8032 | */ |
8033 | sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; |
8034 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
8035 | |
8036 | /* The PHY reset pin is active-low. */ |
8037 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
8038 | reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | |
8039 | CTRL_EXT_SWDPIN(4)); |
8040 | reg |= CTRL_EXT_SWDPIO(4); |
8041 | |
8042 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
8043 | CSR_WRITE_FLUSH(sc); |
8044 | delay(10*1000); |
8045 | |
8046 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); |
8047 | CSR_WRITE_FLUSH(sc); |
8048 | delay(150); |
8049 | #if 0 |
8050 | sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); |
8051 | #endif |
8052 | delay(20*1000); /* XXX extra delay to get PHY ID? */ |
8053 | break; |
8054 | case WM_T_82544: /* reset 10000us */ |
8055 | case WM_T_82540: |
8056 | case WM_T_82545: |
8057 | case WM_T_82545_3: |
8058 | case WM_T_82546: |
8059 | case WM_T_82546_3: |
8060 | case WM_T_82541: |
8061 | case WM_T_82541_2: |
8062 | case WM_T_82547: |
8063 | case WM_T_82547_2: |
8064 | case WM_T_82571: /* reset 100us */ |
8065 | case WM_T_82572: |
8066 | case WM_T_82573: |
8067 | case WM_T_82574: |
8068 | case WM_T_82575: |
8069 | case WM_T_82576: |
8070 | case WM_T_82580: |
8071 | case WM_T_I350: |
8072 | case WM_T_I354: |
8073 | case WM_T_I210: |
8074 | case WM_T_I211: |
8075 | case WM_T_82583: |
8076 | case WM_T_80003: |
8077 | /* generic reset */ |
8078 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); |
8079 | CSR_WRITE_FLUSH(sc); |
8080 | delay(20000); |
8081 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
8082 | CSR_WRITE_FLUSH(sc); |
8083 | delay(20000); |
8084 | |
8085 | if ((sc->sc_type == WM_T_82541) |
8086 | || (sc->sc_type == WM_T_82541_2) |
8087 | || (sc->sc_type == WM_T_82547) |
8088 | || (sc->sc_type == WM_T_82547_2)) { |
8089 | /* workaround for igp are done in igp_reset() */ |
8090 | /* XXX add code to set LED after phy reset */ |
8091 | } |
8092 | break; |
8093 | case WM_T_ICH8: |
8094 | case WM_T_ICH9: |
8095 | case WM_T_ICH10: |
8096 | case WM_T_PCH: |
8097 | case WM_T_PCH2: |
8098 | case WM_T_PCH_LPT: |
8099 | case WM_T_PCH_SPT: |
8100 | /* generic reset */ |
8101 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); |
8102 | CSR_WRITE_FLUSH(sc); |
8103 | delay(100); |
8104 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
8105 | CSR_WRITE_FLUSH(sc); |
8106 | delay(150); |
8107 | break; |
8108 | default: |
8109 | panic("%s: %s: unknown type\n" , device_xname(sc->sc_dev), |
8110 | __func__); |
8111 | break; |
8112 | } |
8113 | |
8114 | sc->phy.release(sc); |
8115 | |
8116 | /* get_cfg_done */ |
8117 | wm_get_cfg_done(sc); |
8118 | |
8119 | /* extra setup */ |
8120 | switch (sc->sc_type) { |
8121 | case WM_T_82542_2_0: |
8122 | case WM_T_82542_2_1: |
8123 | case WM_T_82543: |
8124 | case WM_T_82544: |
8125 | case WM_T_82540: |
8126 | case WM_T_82545: |
8127 | case WM_T_82545_3: |
8128 | case WM_T_82546: |
8129 | case WM_T_82546_3: |
8130 | case WM_T_82541_2: |
8131 | case WM_T_82547_2: |
8132 | case WM_T_82571: |
8133 | case WM_T_82572: |
8134 | case WM_T_82573: |
8135 | case WM_T_82575: |
8136 | case WM_T_82576: |
8137 | case WM_T_82580: |
8138 | case WM_T_I350: |
8139 | case WM_T_I354: |
8140 | case WM_T_I210: |
8141 | case WM_T_I211: |
8142 | case WM_T_80003: |
8143 | /* null */ |
8144 | break; |
8145 | case WM_T_82574: |
8146 | case WM_T_82583: |
8147 | wm_lplu_d0_disable(sc); |
8148 | break; |
8149 | case WM_T_82541: |
8150 | case WM_T_82547: |
8151 | /* XXX Configure actively LED after PHY reset */ |
8152 | break; |
8153 | case WM_T_ICH8: |
8154 | case WM_T_ICH9: |
8155 | case WM_T_ICH10: |
8156 | case WM_T_PCH: |
8157 | case WM_T_PCH2: |
8158 | case WM_T_PCH_LPT: |
8159 | case WM_T_PCH_SPT: |
8160 | /* Allow time for h/w to get to a quiescent state afer reset */ |
8161 | delay(10*1000); |
8162 | |
8163 | if (sc->sc_type == WM_T_PCH) |
8164 | wm_hv_phy_workaround_ich8lan(sc); |
8165 | |
8166 | if (sc->sc_type == WM_T_PCH2) |
8167 | wm_lv_phy_workaround_ich8lan(sc); |
8168 | |
8169 | /* Clear the host wakeup bit after lcd reset */ |
8170 | if (sc->sc_type >= WM_T_PCH) { |
8171 | reg = wm_gmii_hv_readreg(sc->sc_dev, 2, |
8172 | BM_PORT_GEN_CFG); |
8173 | reg &= ~BM_WUC_HOST_WU_BIT; |
8174 | wm_gmii_hv_writereg(sc->sc_dev, 2, |
8175 | BM_PORT_GEN_CFG, reg); |
8176 | } |
8177 | |
8178 | /* |
8179 | * XXX Configure the LCD with th extended configuration region |
8180 | * in NVM |
8181 | */ |
8182 | |
8183 | /* Disable D0 LPLU. */ |
8184 | if (sc->sc_type >= WM_T_PCH) /* PCH* */ |
8185 | wm_lplu_d0_disable_pch(sc); |
8186 | else |
8187 | wm_lplu_d0_disable(sc); /* ICH* */ |
8188 | break; |
8189 | default: |
8190 | panic("%s: unknown type\n" , __func__); |
8191 | break; |
8192 | } |
8193 | } |
8194 | |
8195 | /* |
8196 | * wm_get_phy_id_82575: |
8197 | * |
8198 | * Return PHY ID. Return -1 if it failed. |
8199 | */ |
8200 | static int |
8201 | wm_get_phy_id_82575(struct wm_softc *sc) |
8202 | { |
8203 | uint32_t reg; |
8204 | int phyid = -1; |
8205 | |
8206 | /* XXX */ |
8207 | if ((sc->sc_flags & WM_F_SGMII) == 0) |
8208 | return -1; |
8209 | |
8210 | if (wm_sgmii_uses_mdio(sc)) { |
8211 | switch (sc->sc_type) { |
8212 | case WM_T_82575: |
8213 | case WM_T_82576: |
8214 | reg = CSR_READ(sc, WMREG_MDIC); |
8215 | phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; |
8216 | break; |
8217 | case WM_T_82580: |
8218 | case WM_T_I350: |
8219 | case WM_T_I354: |
8220 | case WM_T_I210: |
8221 | case WM_T_I211: |
8222 | reg = CSR_READ(sc, WMREG_MDICNFG); |
8223 | phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; |
8224 | break; |
8225 | default: |
8226 | return -1; |
8227 | } |
8228 | } |
8229 | |
8230 | return phyid; |
8231 | } |
8232 | |
8233 | |
8234 | /* |
8235 | * wm_gmii_mediainit: |
8236 | * |
8237 | * Initialize media for use on 1000BASE-T devices. |
8238 | */ |
8239 | static void |
8240 | wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) |
8241 | { |
8242 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
8243 | struct mii_data *mii = &sc->sc_mii; |
8244 | uint32_t reg; |
8245 | |
8246 | DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n" , |
8247 | device_xname(sc->sc_dev), __func__)); |
8248 | |
8249 | /* We have GMII. */ |
8250 | sc->sc_flags |= WM_F_HAS_MII; |
8251 | |
8252 | if (sc->sc_type == WM_T_80003) |
8253 | sc->sc_tipg = TIPG_1000T_80003_DFLT; |
8254 | else |
8255 | sc->sc_tipg = TIPG_1000T_DFLT; |
8256 | |
8257 | /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */ |
8258 | if ((sc->sc_type == WM_T_82580) |
8259 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) |
8260 | || (sc->sc_type == WM_T_I211)) { |
8261 | reg = CSR_READ(sc, WMREG_PHPM); |
8262 | reg &= ~PHPM_GO_LINK_D; |
8263 | CSR_WRITE(sc, WMREG_PHPM, reg); |
8264 | } |
8265 | |
8266 | /* |
8267 | * Let the chip set speed/duplex on its own based on |
8268 | * signals from the PHY. |
8269 | * XXXbouyer - I'm not sure this is right for the 80003, |
8270 | * the em driver only sets CTRL_SLU here - but it seems to work. |
8271 | */ |
8272 | sc->sc_ctrl |= CTRL_SLU; |
8273 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
8274 | |
8275 | /* Initialize our media structures and probe the GMII. */ |
8276 | mii->mii_ifp = ifp; |
8277 | |
8278 | /* |
8279 | * Determine the PHY access method. |
8280 | * |
8281 | * For SGMII, use SGMII specific method. |
8282 | * |
8283 | * For some devices, we can determine the PHY access method |
8284 | * from sc_type. |
8285 | * |
8286 | * For ICH and PCH variants, it's difficult to determine the PHY |
8287 | * access method by sc_type, so use the PCI product ID for some |
8288 | * devices. |
8289 | * For other ICH8 variants, try to use igp's method. If the PHY |
8290 | * can't detect, then use bm's method. |
8291 | */ |
8292 | switch (prodid) { |
8293 | case PCI_PRODUCT_INTEL_PCH_M_LM: |
8294 | case PCI_PRODUCT_INTEL_PCH_M_LC: |
8295 | /* 82577 */ |
8296 | sc->sc_phytype = WMPHY_82577; |
8297 | break; |
8298 | case PCI_PRODUCT_INTEL_PCH_D_DM: |
8299 | case PCI_PRODUCT_INTEL_PCH_D_DC: |
8300 | /* 82578 */ |
8301 | sc->sc_phytype = WMPHY_82578; |
8302 | break; |
8303 | case PCI_PRODUCT_INTEL_PCH2_LV_LM: |
8304 | case PCI_PRODUCT_INTEL_PCH2_LV_V: |
8305 | /* 82579 */ |
8306 | sc->sc_phytype = WMPHY_82579; |
8307 | break; |
8308 | case PCI_PRODUCT_INTEL_82801H_82567V_3: |
8309 | case PCI_PRODUCT_INTEL_82801I_BM: |
8310 | case PCI_PRODUCT_INTEL_82801J_R_BM_LM: |
8311 | case PCI_PRODUCT_INTEL_82801J_R_BM_LF: |
8312 | case PCI_PRODUCT_INTEL_82801J_D_BM_LM: |
8313 | case PCI_PRODUCT_INTEL_82801J_D_BM_LF: |
8314 | case PCI_PRODUCT_INTEL_82801J_R_BM_V: |
8315 | /* ICH8, 9, 10 with 82567 */ |
8316 | sc->sc_phytype = WMPHY_BM; |
8317 | mii->mii_readreg = wm_gmii_bm_readreg; |
8318 | mii->mii_writereg = wm_gmii_bm_writereg; |
8319 | break; |
8320 | default: |
8321 | if (((sc->sc_flags & WM_F_SGMII) != 0) |
8322 | && !wm_sgmii_uses_mdio(sc)){ |
8323 | /* SGMII */ |
8324 | mii->mii_readreg = wm_sgmii_readreg; |
8325 | mii->mii_writereg = wm_sgmii_writereg; |
8326 | } else if ((sc->sc_type == WM_T_82574) |
8327 | || (sc->sc_type == WM_T_82583)) { |
8328 | /* BM2 (phyaddr == 1) */ |
8329 | sc->sc_phytype = WMPHY_BM; |
8330 | mii->mii_readreg = wm_gmii_bm_readreg; |
8331 | mii->mii_writereg = wm_gmii_bm_writereg; |
8332 | } else if (sc->sc_type >= WM_T_ICH8) { |
8333 | /* non-82567 ICH8, 9 and 10 */ |
8334 | mii->mii_readreg = wm_gmii_i82544_readreg; |
8335 | mii->mii_writereg = wm_gmii_i82544_writereg; |
8336 | } else if (sc->sc_type >= WM_T_80003) { |
8337 | /* 80003 */ |
8338 | sc->sc_phytype = WMPHY_GG82563; |
8339 | mii->mii_readreg = wm_gmii_i80003_readreg; |
8340 | mii->mii_writereg = wm_gmii_i80003_writereg; |
8341 | } else if (sc->sc_type >= WM_T_I210) { |
8342 | /* I210 and I211 */ |
8343 | sc->sc_phytype = WMPHY_210; |
8344 | mii->mii_readreg = wm_gmii_gs40g_readreg; |
8345 | mii->mii_writereg = wm_gmii_gs40g_writereg; |
8346 | } else if (sc->sc_type >= WM_T_82580) { |
8347 | /* 82580, I350 and I354 */ |
8348 | sc->sc_phytype = WMPHY_82580; |
8349 | mii->mii_readreg = wm_gmii_82580_readreg; |
8350 | mii->mii_writereg = wm_gmii_82580_writereg; |
8351 | } else if (sc->sc_type >= WM_T_82544) { |
8352 | /* 82544, 0, [56], [17], 8257[1234] and 82583 */ |
8353 | mii->mii_readreg = wm_gmii_i82544_readreg; |
8354 | mii->mii_writereg = wm_gmii_i82544_writereg; |
8355 | } else { |
8356 | mii->mii_readreg = wm_gmii_i82543_readreg; |
8357 | mii->mii_writereg = wm_gmii_i82543_writereg; |
8358 | } |
8359 | break; |
8360 | } |
8361 | if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) { |
8362 | /* All PCH* use _hv_ */ |
8363 | mii->mii_readreg = wm_gmii_hv_readreg; |
8364 | mii->mii_writereg = wm_gmii_hv_writereg; |
8365 | } |
8366 | mii->mii_statchg = wm_gmii_statchg; |
8367 | |
8368 | /* get PHY control from SMBus to PCIe */ |
8369 | if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) |
8370 | || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) |
8371 | wm_smbustopci(sc); |
8372 | |
8373 | wm_gmii_reset(sc); |
8374 | |
8375 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
8376 | ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, |
8377 | wm_gmii_mediastatus); |
8378 | |
8379 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) |
8380 | || (sc->sc_type == WM_T_82580) |
8381 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) |
8382 | || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { |
8383 | if ((sc->sc_flags & WM_F_SGMII) == 0) { |
8384 | /* Attach only one port */ |
8385 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, |
8386 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8387 | } else { |
8388 | int i, id; |
8389 | uint32_t ctrl_ext; |
8390 | |
8391 | id = wm_get_phy_id_82575(sc); |
8392 | if (id != -1) { |
8393 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, |
8394 | id, MII_OFFSET_ANY, MIIF_DOPAUSE); |
8395 | } |
8396 | if ((id == -1) |
8397 | || (LIST_FIRST(&mii->mii_phys) == NULL)) { |
8398 | /* Power on sgmii phy if it is disabled */ |
8399 | ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); |
8400 | CSR_WRITE(sc, WMREG_CTRL_EXT, |
8401 | ctrl_ext &~ CTRL_EXT_SWDPIN(3)); |
8402 | CSR_WRITE_FLUSH(sc); |
8403 | delay(300*1000); /* XXX too long */ |
8404 | |
8405 | /* from 1 to 8 */ |
8406 | for (i = 1; i < 8; i++) |
8407 | mii_attach(sc->sc_dev, &sc->sc_mii, |
8408 | 0xffffffff, i, MII_OFFSET_ANY, |
8409 | MIIF_DOPAUSE); |
8410 | |
8411 | /* restore previous sfp cage power state */ |
8412 | CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); |
8413 | } |
8414 | } |
8415 | } else { |
8416 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
8417 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8418 | } |
8419 | |
8420 | /* |
8421 | * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call |
8422 | * wm_set_mdio_slow_mode_hv() for a workaround and retry. |
8423 | */ |
8424 | if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && |
8425 | (LIST_FIRST(&mii->mii_phys) == NULL)) { |
8426 | wm_set_mdio_slow_mode_hv(sc); |
8427 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
8428 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8429 | } |
8430 | |
8431 | /* |
8432 | * (For ICH8 variants) |
8433 | * If PHY detection failed, use BM's r/w function and retry. |
8434 | */ |
8435 | if (LIST_FIRST(&mii->mii_phys) == NULL) { |
8436 | /* if failed, retry with *_bm_* */ |
8437 | mii->mii_readreg = wm_gmii_bm_readreg; |
8438 | mii->mii_writereg = wm_gmii_bm_writereg; |
8439 | |
8440 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
8441 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8442 | } |
8443 | |
8444 | if (LIST_FIRST(&mii->mii_phys) == NULL) { |
8445 | /* Any PHY wasn't find */ |
8446 | ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); |
8447 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); |
8448 | sc->sc_phytype = WMPHY_NONE; |
8449 | } else { |
8450 | /* |
8451 | * PHY Found! |
8452 | * Check PHY type. |
8453 | */ |
8454 | uint32_t model; |
8455 | struct mii_softc *child; |
8456 | |
8457 | child = LIST_FIRST(&mii->mii_phys); |
8458 | model = child->mii_mpd_model; |
8459 | if (model == MII_MODEL_yyINTEL_I82566) |
8460 | sc->sc_phytype = WMPHY_IGP_3; |
8461 | |
8462 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); |
8463 | } |
8464 | } |
8465 | |
8466 | /* |
8467 | * wm_gmii_mediachange: [ifmedia interface function] |
8468 | * |
8469 | * Set hardware to newly-selected media on a 1000BASE-T device. |
8470 | */ |
8471 | static int |
8472 | wm_gmii_mediachange(struct ifnet *ifp) |
8473 | { |
8474 | struct wm_softc *sc = ifp->if_softc; |
8475 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; |
8476 | int rc; |
8477 | |
8478 | DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n" , |
8479 | device_xname(sc->sc_dev), __func__)); |
8480 | if ((ifp->if_flags & IFF_UP) == 0) |
8481 | return 0; |
8482 | |
8483 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); |
8484 | sc->sc_ctrl |= CTRL_SLU; |
8485 | if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) |
8486 | || (sc->sc_type > WM_T_82543)) { |
8487 | sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); |
8488 | } else { |
8489 | sc->sc_ctrl &= ~CTRL_ASDE; |
8490 | sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; |
8491 | if (ife->ifm_media & IFM_FDX) |
8492 | sc->sc_ctrl |= CTRL_FD; |
8493 | switch (IFM_SUBTYPE(ife->ifm_media)) { |
8494 | case IFM_10_T: |
8495 | sc->sc_ctrl |= CTRL_SPEED_10; |
8496 | break; |
8497 | case IFM_100_TX: |
8498 | sc->sc_ctrl |= CTRL_SPEED_100; |
8499 | break; |
8500 | case IFM_1000_T: |
8501 | sc->sc_ctrl |= CTRL_SPEED_1000; |
8502 | break; |
8503 | default: |
8504 | panic("wm_gmii_mediachange: bad media 0x%x" , |
8505 | ife->ifm_media); |
8506 | } |
8507 | } |
8508 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
8509 | if (sc->sc_type <= WM_T_82543) |
8510 | wm_gmii_reset(sc); |
8511 | |
8512 | if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) |
8513 | return 0; |
8514 | return rc; |
8515 | } |
8516 | |
8517 | /* |
8518 | * wm_gmii_mediastatus: [ifmedia interface function] |
8519 | * |
8520 | * Get the current interface media status on a 1000BASE-T device. |
8521 | */ |
8522 | static void |
8523 | wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
8524 | { |
8525 | struct wm_softc *sc = ifp->if_softc; |
8526 | |
8527 | ether_mediastatus(ifp, ifmr); |
8528 | ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
8529 | | sc->sc_flowflags; |
8530 | } |
8531 | |
8532 | #define MDI_IO CTRL_SWDPIN(2) |
8533 | #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ |
8534 | #define MDI_CLK CTRL_SWDPIN(3) |
8535 | |
8536 | static void |
8537 | wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) |
8538 | { |
8539 | uint32_t i, v; |
8540 | |
8541 | v = CSR_READ(sc, WMREG_CTRL); |
8542 | v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); |
8543 | v |= MDI_DIR | CTRL_SWDPIO(3); |
8544 | |
8545 | for (i = 1 << (nbits - 1); i != 0; i >>= 1) { |
8546 | if (data & i) |
8547 | v |= MDI_IO; |
8548 | else |
8549 | v &= ~MDI_IO; |
8550 | CSR_WRITE(sc, WMREG_CTRL, v); |
8551 | CSR_WRITE_FLUSH(sc); |
8552 | delay(10); |
8553 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8554 | CSR_WRITE_FLUSH(sc); |
8555 | delay(10); |
8556 | CSR_WRITE(sc, WMREG_CTRL, v); |
8557 | CSR_WRITE_FLUSH(sc); |
8558 | delay(10); |
8559 | } |
8560 | } |
8561 | |
8562 | static uint32_t |
8563 | wm_i82543_mii_recvbits(struct wm_softc *sc) |
8564 | { |
8565 | uint32_t v, i, data = 0; |
8566 | |
8567 | v = CSR_READ(sc, WMREG_CTRL); |
8568 | v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); |
8569 | v |= CTRL_SWDPIO(3); |
8570 | |
8571 | CSR_WRITE(sc, WMREG_CTRL, v); |
8572 | CSR_WRITE_FLUSH(sc); |
8573 | delay(10); |
8574 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8575 | CSR_WRITE_FLUSH(sc); |
8576 | delay(10); |
8577 | CSR_WRITE(sc, WMREG_CTRL, v); |
8578 | CSR_WRITE_FLUSH(sc); |
8579 | delay(10); |
8580 | |
8581 | for (i = 0; i < 16; i++) { |
8582 | data <<= 1; |
8583 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8584 | CSR_WRITE_FLUSH(sc); |
8585 | delay(10); |
8586 | if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) |
8587 | data |= 1; |
8588 | CSR_WRITE(sc, WMREG_CTRL, v); |
8589 | CSR_WRITE_FLUSH(sc); |
8590 | delay(10); |
8591 | } |
8592 | |
8593 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8594 | CSR_WRITE_FLUSH(sc); |
8595 | delay(10); |
8596 | CSR_WRITE(sc, WMREG_CTRL, v); |
8597 | CSR_WRITE_FLUSH(sc); |
8598 | delay(10); |
8599 | |
8600 | return data; |
8601 | } |
8602 | |
8603 | #undef MDI_IO |
8604 | #undef MDI_DIR |
8605 | #undef MDI_CLK |
8606 | |
8607 | /* |
8608 | * wm_gmii_i82543_readreg: [mii interface function] |
8609 | * |
8610 | * Read a PHY register on the GMII (i82543 version). |
8611 | */ |
8612 | static int |
8613 | wm_gmii_i82543_readreg(device_t self, int phy, int reg) |
8614 | { |
8615 | struct wm_softc *sc = device_private(self); |
8616 | int rv; |
8617 | |
8618 | wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); |
8619 | wm_i82543_mii_sendbits(sc, reg | (phy << 5) | |
8620 | (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); |
8621 | rv = wm_i82543_mii_recvbits(sc) & 0xffff; |
8622 | |
8623 | DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n" , |
8624 | device_xname(sc->sc_dev), phy, reg, rv)); |
8625 | |
8626 | return rv; |
8627 | } |
8628 | |
8629 | /* |
8630 | * wm_gmii_i82543_writereg: [mii interface function] |
8631 | * |
8632 | * Write a PHY register on the GMII (i82543 version). |
8633 | */ |
8634 | static void |
8635 | wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) |
8636 | { |
8637 | struct wm_softc *sc = device_private(self); |
8638 | |
8639 | wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); |
8640 | wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | |
8641 | (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | |
8642 | (MII_COMMAND_START << 30), 32); |
8643 | } |
8644 | |
8645 | /* |
8646 | * wm_gmii_mdic_readreg: [mii interface function] |
8647 | * |
8648 | * Read a PHY register on the GMII. |
8649 | */ |
8650 | static int |
8651 | wm_gmii_mdic_readreg(device_t self, int phy, int reg) |
8652 | { |
8653 | struct wm_softc *sc = device_private(self); |
8654 | uint32_t mdic = 0; |
8655 | int i, rv; |
8656 | |
8657 | CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | |
8658 | MDIC_REGADD(reg)); |
8659 | |
8660 | for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { |
8661 | mdic = CSR_READ(sc, WMREG_MDIC); |
8662 | if (mdic & MDIC_READY) |
8663 | break; |
8664 | delay(50); |
8665 | } |
8666 | |
8667 | if ((mdic & MDIC_READY) == 0) { |
8668 | log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n" , |
8669 | device_xname(sc->sc_dev), phy, reg); |
8670 | rv = 0; |
8671 | } else if (mdic & MDIC_E) { |
8672 | #if 0 /* This is normal if no PHY is present. */ |
8673 | log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n" , |
8674 | device_xname(sc->sc_dev), phy, reg); |
8675 | #endif |
8676 | rv = 0; |
8677 | } else { |
8678 | rv = MDIC_DATA(mdic); |
8679 | if (rv == 0xffff) |
8680 | rv = 0; |
8681 | } |
8682 | |
8683 | return rv; |
8684 | } |
8685 | |
8686 | /* |
8687 | * wm_gmii_mdic_writereg: [mii interface function] |
8688 | * |
8689 | * Write a PHY register on the GMII. |
8690 | */ |
8691 | static void |
8692 | wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val) |
8693 | { |
8694 | struct wm_softc *sc = device_private(self); |
8695 | uint32_t mdic = 0; |
8696 | int i; |
8697 | |
8698 | CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | |
8699 | MDIC_REGADD(reg) | MDIC_DATA(val)); |
8700 | |
8701 | for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { |
8702 | mdic = CSR_READ(sc, WMREG_MDIC); |
8703 | if (mdic & MDIC_READY) |
8704 | break; |
8705 | delay(50); |
8706 | } |
8707 | |
8708 | if ((mdic & MDIC_READY) == 0) |
8709 | log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n" , |
8710 | device_xname(sc->sc_dev), phy, reg); |
8711 | else if (mdic & MDIC_E) |
8712 | log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n" , |
8713 | device_xname(sc->sc_dev), phy, reg); |
8714 | } |
8715 | |
8716 | /* |
8717 | * wm_gmii_i82544_readreg: [mii interface function] |
8718 | * |
8719 | * Read a PHY register on the GMII. |
8720 | */ |
8721 | static int |
8722 | wm_gmii_i82544_readreg(device_t self, int phy, int reg) |
8723 | { |
8724 | struct wm_softc *sc = device_private(self); |
8725 | int rv; |
8726 | |
8727 | if (sc->phy.acquire(sc)) { |
8728 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
8729 | __func__); |
8730 | return 0; |
8731 | } |
8732 | rv = wm_gmii_mdic_readreg(self, phy, reg); |
8733 | sc->phy.release(sc); |
8734 | |
8735 | return rv; |
8736 | } |
8737 | |
8738 | /* |
8739 | * wm_gmii_i82544_writereg: [mii interface function] |
8740 | * |
8741 | * Write a PHY register on the GMII. |
8742 | */ |
8743 | static void |
8744 | wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) |
8745 | { |
8746 | struct wm_softc *sc = device_private(self); |
8747 | |
8748 | if (sc->phy.acquire(sc)) { |
8749 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
8750 | __func__); |
8751 | } |
8752 | wm_gmii_mdic_writereg(self, phy, reg, val); |
8753 | sc->phy.release(sc); |
8754 | } |
8755 | |
8756 | /* |
8757 | * wm_gmii_i80003_readreg: [mii interface function] |
8758 | * |
8759 | * Read a PHY register on the kumeran |
8760 | * This could be handled by the PHY layer if we didn't have to lock the |
8761 | * ressource ... |
8762 | */ |
8763 | static int |
8764 | wm_gmii_i80003_readreg(device_t self, int phy, int reg) |
8765 | { |
8766 | struct wm_softc *sc = device_private(self); |
8767 | int rv; |
8768 | |
8769 | if (phy != 1) /* only one PHY on kumeran bus */ |
8770 | return 0; |
8771 | |
8772 | if (sc->phy.acquire(sc)) { |
8773 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
8774 | __func__); |
8775 | return 0; |
8776 | } |
8777 | |
8778 | if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) { |
8779 | wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT, |
8780 | reg >> GG82563_PAGE_SHIFT); |
8781 | } else { |
8782 | wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, |
8783 | reg >> GG82563_PAGE_SHIFT); |
8784 | } |
8785 | /* Wait more 200us for a bug of the ready bit in the MDIC register */ |
8786 | delay(200); |
8787 | rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK); |
8788 | delay(200); |
8789 | sc->phy.release(sc); |
8790 | |
8791 | return rv; |
8792 | } |
8793 | |
8794 | /* |
8795 | * wm_gmii_i80003_writereg: [mii interface function] |
8796 | * |
8797 | * Write a PHY register on the kumeran. |
8798 | * This could be handled by the PHY layer if we didn't have to lock the |
8799 | * ressource ... |
8800 | */ |
8801 | static void |
8802 | wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) |
8803 | { |
8804 | struct wm_softc *sc = device_private(self); |
8805 | |
8806 | if (phy != 1) /* only one PHY on kumeran bus */ |
8807 | return; |
8808 | |
8809 | if (sc->phy.acquire(sc)) { |
8810 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
8811 | __func__); |
8812 | return; |
8813 | } |
8814 | |
8815 | if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) { |
8816 | wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT, |
8817 | reg >> GG82563_PAGE_SHIFT); |
8818 | } else { |
8819 | wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, |
8820 | reg >> GG82563_PAGE_SHIFT); |
8821 | } |
8822 | /* Wait more 200us for a bug of the ready bit in the MDIC register */ |
8823 | delay(200); |
8824 | wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val); |
8825 | delay(200); |
8826 | |
8827 | sc->phy.release(sc); |
8828 | } |
8829 | |
8830 | /* |
8831 | * wm_gmii_bm_readreg: [mii interface function] |
8832 | * |
8833 | * Read a PHY register on the kumeran |
8834 | * This could be handled by the PHY layer if we didn't have to lock the |
8835 | * ressource ... |
8836 | */ |
8837 | static int |
8838 | wm_gmii_bm_readreg(device_t self, int phy, int reg) |
8839 | { |
8840 | struct wm_softc *sc = device_private(self); |
8841 | uint16_t page = reg >> BME1000_PAGE_SHIFT; |
8842 | uint16_t val; |
8843 | int rv; |
8844 | |
8845 | if (sc->phy.acquire(sc)) { |
8846 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
8847 | __func__); |
8848 | return 0; |
8849 | } |
8850 | |
8851 | if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) |
8852 | phy = ((page >= 768) || ((page == 0) && (reg == 25)) |
8853 | || (reg == 31)) ? 1 : phy; |
8854 | /* Page 800 works differently than the rest so it has its own func */ |
8855 | if (page == BM_WUC_PAGE) { |
8856 | wm_access_phy_wakeup_reg_bm(self, reg, &val, 1); |
8857 | rv = val; |
8858 | goto release; |
8859 | } |
8860 | |
8861 | if (reg > BME1000_MAX_MULTI_PAGE_REG) { |
8862 | if ((phy == 1) && (sc->sc_type != WM_T_82574) |
8863 | && (sc->sc_type != WM_T_82583)) |
8864 | wm_gmii_mdic_writereg(self, phy, |
8865 | MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); |
8866 | else |
8867 | wm_gmii_mdic_writereg(self, phy, |
8868 | BME1000_PHY_PAGE_SELECT, page); |
8869 | } |
8870 | |
8871 | rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK); |
8872 | |
8873 | release: |
8874 | sc->phy.release(sc); |
8875 | return rv; |
8876 | } |
8877 | |
8878 | /* |
8879 | * wm_gmii_bm_writereg: [mii interface function] |
8880 | * |
8881 | * Write a PHY register on the kumeran. |
8882 | * This could be handled by the PHY layer if we didn't have to lock the |
8883 | * ressource ... |
8884 | */ |
8885 | static void |
8886 | wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) |
8887 | { |
8888 | struct wm_softc *sc = device_private(self); |
8889 | uint16_t page = reg >> BME1000_PAGE_SHIFT; |
8890 | |
8891 | if (sc->phy.acquire(sc)) { |
8892 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
8893 | __func__); |
8894 | return; |
8895 | } |
8896 | |
8897 | if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) |
8898 | phy = ((page >= 768) || ((page == 0) && (reg == 25)) |
8899 | || (reg == 31)) ? 1 : phy; |
8900 | /* Page 800 works differently than the rest so it has its own func */ |
8901 | if (page == BM_WUC_PAGE) { |
8902 | uint16_t tmp; |
8903 | |
8904 | tmp = val; |
8905 | wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0); |
8906 | goto release; |
8907 | } |
8908 | |
8909 | if (reg > BME1000_MAX_MULTI_PAGE_REG) { |
8910 | if ((phy == 1) && (sc->sc_type != WM_T_82574) |
8911 | && (sc->sc_type != WM_T_82583)) |
8912 | wm_gmii_mdic_writereg(self, phy, |
8913 | MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); |
8914 | else |
8915 | wm_gmii_mdic_writereg(self, phy, |
8916 | BME1000_PHY_PAGE_SELECT, page); |
8917 | } |
8918 | |
8919 | wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val); |
8920 | |
8921 | release: |
8922 | sc->phy.release(sc); |
8923 | } |
8924 | |
8925 | static void |
8926 | wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd) |
8927 | { |
8928 | struct wm_softc *sc = device_private(self); |
8929 | uint16_t regnum = BM_PHY_REG_NUM(offset); |
8930 | uint16_t wuce, reg; |
8931 | |
8932 | DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n" , |
8933 | device_xname(sc->sc_dev), __func__)); |
8934 | /* XXX Gig must be disabled for MDIO accesses to page 800 */ |
8935 | if (sc->sc_type == WM_T_PCH) { |
8936 | /* XXX e1000 driver do nothing... why? */ |
8937 | } |
8938 | |
8939 | /* |
8940 | * 1) Enable PHY wakeup register first. |
8941 | * See e1000_enable_phy_wakeup_reg_access_bm(). |
8942 | */ |
8943 | |
8944 | /* Set page 769 */ |
8945 | wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
8946 | BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); |
8947 | |
8948 | /* Read WUCE and save it */ |
8949 | wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG); |
8950 | |
8951 | reg = wuce | BM_WUC_ENABLE_BIT; |
8952 | reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); |
8953 | wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg); |
8954 | |
8955 | /* Select page 800 */ |
8956 | wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
8957 | BM_WUC_PAGE << BME1000_PAGE_SHIFT); |
8958 | |
8959 | /* |
8960 | * 2) Access PHY wakeup register. |
8961 | * See e1000_access_phy_wakeup_reg_bm. |
8962 | */ |
8963 | |
8964 | /* Write page 800 */ |
8965 | wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum); |
8966 | |
8967 | if (rd) |
8968 | *val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE); |
8969 | else |
8970 | wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val); |
8971 | |
8972 | /* |
8973 | * 3) Disable PHY wakeup register. |
8974 | * See e1000_disable_phy_wakeup_reg_access_bm(). |
8975 | */ |
8976 | /* Set page 769 */ |
8977 | wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
8978 | BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); |
8979 | |
8980 | wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce); |
8981 | } |
8982 | |
8983 | /* |
8984 | * wm_gmii_hv_readreg: [mii interface function] |
8985 | * |
8986 | * Read a PHY register on the kumeran |
8987 | * This could be handled by the PHY layer if we didn't have to lock the |
8988 | * ressource ... |
8989 | */ |
8990 | static int |
8991 | wm_gmii_hv_readreg(device_t self, int phy, int reg) |
8992 | { |
8993 | struct wm_softc *sc = device_private(self); |
8994 | int rv; |
8995 | |
8996 | DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n" , |
8997 | device_xname(sc->sc_dev), __func__)); |
8998 | if (sc->phy.acquire(sc)) { |
8999 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9000 | __func__); |
9001 | return 0; |
9002 | } |
9003 | |
9004 | rv = wm_gmii_hv_readreg_locked(self, phy, reg); |
9005 | sc->phy.release(sc); |
9006 | return rv; |
9007 | } |
9008 | |
9009 | static int |
9010 | wm_gmii_hv_readreg_locked(device_t self, int phy, int reg) |
9011 | { |
9012 | uint16_t page = BM_PHY_REG_PAGE(reg); |
9013 | uint16_t regnum = BM_PHY_REG_NUM(reg); |
9014 | uint16_t val; |
9015 | int rv; |
9016 | |
9017 | phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy; |
9018 | |
9019 | /* Page 800 works differently than the rest so it has its own func */ |
9020 | if (page == BM_WUC_PAGE) { |
9021 | wm_access_phy_wakeup_reg_bm(self, reg, &val, 1); |
9022 | return val; |
9023 | } |
9024 | |
9025 | /* |
9026 | * Lower than page 768 works differently than the rest so it has its |
9027 | * own func |
9028 | */ |
9029 | if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { |
9030 | printf("gmii_hv_readreg!!!\n" ); |
9031 | return 0; |
9032 | } |
9033 | |
9034 | if (regnum > BME1000_MAX_MULTI_PAGE_REG) { |
9035 | wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
9036 | page << BME1000_PAGE_SHIFT); |
9037 | } |
9038 | |
9039 | rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK); |
9040 | return rv; |
9041 | } |
9042 | |
9043 | /* |
9044 | * wm_gmii_hv_writereg: [mii interface function] |
9045 | * |
9046 | * Write a PHY register on the kumeran. |
9047 | * This could be handled by the PHY layer if we didn't have to lock the |
9048 | * ressource ... |
9049 | */ |
9050 | static void |
9051 | wm_gmii_hv_writereg(device_t self, int phy, int reg, int val) |
9052 | { |
9053 | struct wm_softc *sc = device_private(self); |
9054 | |
9055 | DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n" , |
9056 | device_xname(sc->sc_dev), __func__)); |
9057 | |
9058 | if (sc->phy.acquire(sc)) { |
9059 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9060 | __func__); |
9061 | return; |
9062 | } |
9063 | |
9064 | wm_gmii_hv_writereg_locked(self, phy, reg, val); |
9065 | sc->phy.release(sc); |
9066 | } |
9067 | |
9068 | static void |
9069 | wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val) |
9070 | { |
9071 | struct wm_softc *sc = device_private(self); |
9072 | uint16_t page = BM_PHY_REG_PAGE(reg); |
9073 | uint16_t regnum = BM_PHY_REG_NUM(reg); |
9074 | |
9075 | phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy; |
9076 | |
9077 | /* Page 800 works differently than the rest so it has its own func */ |
9078 | if (page == BM_WUC_PAGE) { |
9079 | uint16_t tmp; |
9080 | |
9081 | tmp = val; |
9082 | wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0); |
9083 | return; |
9084 | } |
9085 | |
9086 | /* |
9087 | * Lower than page 768 works differently than the rest so it has its |
9088 | * own func |
9089 | */ |
9090 | if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { |
9091 | printf("gmii_hv_writereg!!!\n" ); |
9092 | return; |
9093 | } |
9094 | |
9095 | { |
9096 | /* |
9097 | * XXX Workaround MDIO accesses being disabled after entering |
9098 | * IEEE Power Down (whenever bit 11 of the PHY control |
9099 | * register is set) |
9100 | */ |
9101 | if (sc->sc_phytype == WMPHY_82578) { |
9102 | struct mii_softc *child; |
9103 | |
9104 | child = LIST_FIRST(&sc->sc_mii.mii_phys); |
9105 | if ((child != NULL) && (child->mii_mpd_rev >= 1) |
9106 | && (phy == 2) && ((regnum & MII_ADDRMASK) == 0) |
9107 | && ((val & (1 << 11)) != 0)) { |
9108 | printf("XXX need workaround\n" ); |
9109 | } |
9110 | } |
9111 | |
9112 | if (regnum > BME1000_MAX_MULTI_PAGE_REG) { |
9113 | wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
9114 | page << BME1000_PAGE_SHIFT); |
9115 | } |
9116 | } |
9117 | |
9118 | wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val); |
9119 | } |
9120 | |
9121 | /* |
9122 | * wm_gmii_82580_readreg: [mii interface function] |
9123 | * |
9124 | * Read a PHY register on the 82580 and I350. |
9125 | * This could be handled by the PHY layer if we didn't have to lock the |
9126 | * ressource ... |
9127 | */ |
9128 | static int |
9129 | wm_gmii_82580_readreg(device_t self, int phy, int reg) |
9130 | { |
9131 | struct wm_softc *sc = device_private(self); |
9132 | int rv; |
9133 | |
9134 | if (sc->phy.acquire(sc) != 0) { |
9135 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9136 | __func__); |
9137 | return 0; |
9138 | } |
9139 | |
9140 | rv = wm_gmii_mdic_readreg(self, phy, reg); |
9141 | |
9142 | sc->phy.release(sc); |
9143 | return rv; |
9144 | } |
9145 | |
9146 | /* |
9147 | * wm_gmii_82580_writereg: [mii interface function] |
9148 | * |
9149 | * Write a PHY register on the 82580 and I350. |
9150 | * This could be handled by the PHY layer if we didn't have to lock the |
9151 | * ressource ... |
9152 | */ |
9153 | static void |
9154 | wm_gmii_82580_writereg(device_t self, int phy, int reg, int val) |
9155 | { |
9156 | struct wm_softc *sc = device_private(self); |
9157 | |
9158 | if (sc->phy.acquire(sc) != 0) { |
9159 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9160 | __func__); |
9161 | return; |
9162 | } |
9163 | |
9164 | wm_gmii_mdic_writereg(self, phy, reg, val); |
9165 | |
9166 | sc->phy.release(sc); |
9167 | } |
9168 | |
9169 | /* |
9170 | * wm_gmii_gs40g_readreg: [mii interface function] |
9171 | * |
9172 | * Read a PHY register on the I2100 and I211. |
9173 | * This could be handled by the PHY layer if we didn't have to lock the |
9174 | * ressource ... |
9175 | */ |
9176 | static int |
9177 | wm_gmii_gs40g_readreg(device_t self, int phy, int reg) |
9178 | { |
9179 | struct wm_softc *sc = device_private(self); |
9180 | int page, offset; |
9181 | int rv; |
9182 | |
9183 | /* Acquire semaphore */ |
9184 | if (sc->phy.acquire(sc)) { |
9185 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9186 | __func__); |
9187 | return 0; |
9188 | } |
9189 | |
9190 | /* Page select */ |
9191 | page = reg >> GS40G_PAGE_SHIFT; |
9192 | wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page); |
9193 | |
9194 | /* Read reg */ |
9195 | offset = reg & GS40G_OFFSET_MASK; |
9196 | rv = wm_gmii_mdic_readreg(self, phy, offset); |
9197 | |
9198 | sc->phy.release(sc); |
9199 | return rv; |
9200 | } |
9201 | |
9202 | /* |
9203 | * wm_gmii_gs40g_writereg: [mii interface function] |
9204 | * |
9205 | * Write a PHY register on the I210 and I211. |
9206 | * This could be handled by the PHY layer if we didn't have to lock the |
9207 | * ressource ... |
9208 | */ |
9209 | static void |
9210 | wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val) |
9211 | { |
9212 | struct wm_softc *sc = device_private(self); |
9213 | int page, offset; |
9214 | |
9215 | /* Acquire semaphore */ |
9216 | if (sc->phy.acquire(sc)) { |
9217 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9218 | __func__); |
9219 | return; |
9220 | } |
9221 | |
9222 | /* Page select */ |
9223 | page = reg >> GS40G_PAGE_SHIFT; |
9224 | wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page); |
9225 | |
9226 | /* Write reg */ |
9227 | offset = reg & GS40G_OFFSET_MASK; |
9228 | wm_gmii_mdic_writereg(self, phy, offset, val); |
9229 | |
9230 | /* Release semaphore */ |
9231 | sc->phy.release(sc); |
9232 | } |
9233 | |
9234 | /* |
9235 | * wm_gmii_statchg: [mii interface function] |
9236 | * |
9237 | * Callback from MII layer when media changes. |
9238 | */ |
9239 | static void |
9240 | wm_gmii_statchg(struct ifnet *ifp) |
9241 | { |
9242 | struct wm_softc *sc = ifp->if_softc; |
9243 | struct mii_data *mii = &sc->sc_mii; |
9244 | |
9245 | sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); |
9246 | sc->sc_tctl &= ~TCTL_COLD(0x3ff); |
9247 | sc->sc_fcrtl &= ~FCRTL_XONE; |
9248 | |
9249 | /* |
9250 | * Get flow control negotiation result. |
9251 | */ |
9252 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && |
9253 | (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { |
9254 | sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; |
9255 | mii->mii_media_active &= ~IFM_ETH_FMASK; |
9256 | } |
9257 | |
9258 | if (sc->sc_flowflags & IFM_FLOW) { |
9259 | if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { |
9260 | sc->sc_ctrl |= CTRL_TFCE; |
9261 | sc->sc_fcrtl |= FCRTL_XONE; |
9262 | } |
9263 | if (sc->sc_flowflags & IFM_ETH_RXPAUSE) |
9264 | sc->sc_ctrl |= CTRL_RFCE; |
9265 | } |
9266 | |
9267 | if (sc->sc_mii.mii_media_active & IFM_FDX) { |
9268 | DPRINTF(WM_DEBUG_LINK, |
9269 | ("%s: LINK: statchg: FDX\n" , ifp->if_xname)); |
9270 | sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
9271 | } else { |
9272 | DPRINTF(WM_DEBUG_LINK, |
9273 | ("%s: LINK: statchg: HDX\n" , ifp->if_xname)); |
9274 | sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); |
9275 | } |
9276 | |
9277 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
9278 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
9279 | CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL |
9280 | : WMREG_FCRTL, sc->sc_fcrtl); |
9281 | if (sc->sc_type == WM_T_80003) { |
9282 | switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { |
9283 | case IFM_1000_T: |
9284 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, |
9285 | KUMCTRLSTA_HD_CTRL_1000_DEFAULT); |
9286 | sc->sc_tipg = TIPG_1000T_80003_DFLT; |
9287 | break; |
9288 | default: |
9289 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, |
9290 | KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); |
9291 | sc->sc_tipg = TIPG_10_100_80003_DFLT; |
9292 | break; |
9293 | } |
9294 | CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); |
9295 | } |
9296 | } |
9297 | |
9298 | /* kumeran related (80003, ICH* and PCH*) */ |
9299 | |
9300 | /* |
9301 | * wm_kmrn_readreg: |
9302 | * |
9303 | * Read a kumeran register |
9304 | */ |
9305 | static int |
9306 | wm_kmrn_readreg(struct wm_softc *sc, int reg) |
9307 | { |
9308 | int rv; |
9309 | |
9310 | if (sc->sc_type == WM_T_80003) |
9311 | rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM); |
9312 | else |
9313 | rv = sc->phy.acquire(sc); |
9314 | if (rv != 0) { |
9315 | aprint_error_dev(sc->sc_dev, |
9316 | "%s: failed to get semaphore\n" , __func__); |
9317 | return 0; |
9318 | } |
9319 | |
9320 | rv = wm_kmrn_readreg_locked(sc, reg); |
9321 | |
9322 | if (sc->sc_type == WM_T_80003) |
9323 | wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); |
9324 | else |
9325 | sc->phy.release(sc); |
9326 | |
9327 | return rv; |
9328 | } |
9329 | |
9330 | static int |
9331 | wm_kmrn_readreg_locked(struct wm_softc *sc, int reg) |
9332 | { |
9333 | int rv; |
9334 | |
9335 | CSR_WRITE(sc, WMREG_KUMCTRLSTA, |
9336 | ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | |
9337 | KUMCTRLSTA_REN); |
9338 | CSR_WRITE_FLUSH(sc); |
9339 | delay(2); |
9340 | |
9341 | rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; |
9342 | |
9343 | return rv; |
9344 | } |
9345 | |
9346 | /* |
9347 | * wm_kmrn_writereg: |
9348 | * |
9349 | * Write a kumeran register |
9350 | */ |
9351 | static void |
9352 | wm_kmrn_writereg(struct wm_softc *sc, int reg, int val) |
9353 | { |
9354 | int rv; |
9355 | |
9356 | if (sc->sc_type == WM_T_80003) |
9357 | rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM); |
9358 | else |
9359 | rv = sc->phy.acquire(sc); |
9360 | if (rv != 0) { |
9361 | aprint_error_dev(sc->sc_dev, |
9362 | "%s: failed to get semaphore\n" , __func__); |
9363 | return; |
9364 | } |
9365 | |
9366 | wm_kmrn_writereg_locked(sc, reg, val); |
9367 | |
9368 | if (sc->sc_type == WM_T_80003) |
9369 | wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); |
9370 | else |
9371 | sc->phy.release(sc); |
9372 | } |
9373 | |
9374 | static void |
9375 | wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val) |
9376 | { |
9377 | |
9378 | CSR_WRITE(sc, WMREG_KUMCTRLSTA, |
9379 | ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | |
9380 | (val & KUMCTRLSTA_MASK)); |
9381 | } |
9382 | |
9383 | /* SGMII related */ |
9384 | |
9385 | /* |
9386 | * wm_sgmii_uses_mdio |
9387 | * |
9388 | * Check whether the transaction is to the internal PHY or the external |
9389 | * MDIO interface. Return true if it's MDIO. |
9390 | */ |
9391 | static bool |
9392 | wm_sgmii_uses_mdio(struct wm_softc *sc) |
9393 | { |
9394 | uint32_t reg; |
9395 | bool ismdio = false; |
9396 | |
9397 | switch (sc->sc_type) { |
9398 | case WM_T_82575: |
9399 | case WM_T_82576: |
9400 | reg = CSR_READ(sc, WMREG_MDIC); |
9401 | ismdio = ((reg & MDIC_DEST) != 0); |
9402 | break; |
9403 | case WM_T_82580: |
9404 | case WM_T_I350: |
9405 | case WM_T_I354: |
9406 | case WM_T_I210: |
9407 | case WM_T_I211: |
9408 | reg = CSR_READ(sc, WMREG_MDICNFG); |
9409 | ismdio = ((reg & MDICNFG_DEST) != 0); |
9410 | break; |
9411 | default: |
9412 | break; |
9413 | } |
9414 | |
9415 | return ismdio; |
9416 | } |
9417 | |
9418 | /* |
9419 | * wm_sgmii_readreg: [mii interface function] |
9420 | * |
9421 | * Read a PHY register on the SGMII |
9422 | * This could be handled by the PHY layer if we didn't have to lock the |
9423 | * ressource ... |
9424 | */ |
9425 | static int |
9426 | wm_sgmii_readreg(device_t self, int phy, int reg) |
9427 | { |
9428 | struct wm_softc *sc = device_private(self); |
9429 | uint32_t i2ccmd; |
9430 | int i, rv; |
9431 | |
9432 | if (sc->phy.acquire(sc)) { |
9433 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9434 | __func__); |
9435 | return 0; |
9436 | } |
9437 | |
9438 | i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) |
9439 | | (phy << I2CCMD_PHY_ADDR_SHIFT) |
9440 | | I2CCMD_OPCODE_READ; |
9441 | CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); |
9442 | |
9443 | /* Poll the ready bit */ |
9444 | for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { |
9445 | delay(50); |
9446 | i2ccmd = CSR_READ(sc, WMREG_I2CCMD); |
9447 | if (i2ccmd & I2CCMD_READY) |
9448 | break; |
9449 | } |
9450 | if ((i2ccmd & I2CCMD_READY) == 0) |
9451 | aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n" ); |
9452 | if ((i2ccmd & I2CCMD_ERROR) != 0) |
9453 | aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n" ); |
9454 | |
9455 | rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00); |
9456 | |
9457 | sc->phy.release(sc); |
9458 | return rv; |
9459 | } |
9460 | |
9461 | /* |
9462 | * wm_sgmii_writereg: [mii interface function] |
9463 | * |
9464 | * Write a PHY register on the SGMII. |
9465 | * This could be handled by the PHY layer if we didn't have to lock the |
9466 | * ressource ... |
9467 | */ |
9468 | static void |
9469 | wm_sgmii_writereg(device_t self, int phy, int reg, int val) |
9470 | { |
9471 | struct wm_softc *sc = device_private(self); |
9472 | uint32_t i2ccmd; |
9473 | int i; |
9474 | int val_swapped; |
9475 | |
9476 | if (sc->phy.acquire(sc) != 0) { |
9477 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
9478 | __func__); |
9479 | return; |
9480 | } |
9481 | /* Swap the data bytes for the I2C interface */ |
9482 | val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00); |
9483 | i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) |
9484 | | (phy << I2CCMD_PHY_ADDR_SHIFT) |
9485 | | I2CCMD_OPCODE_WRITE | val_swapped; |
9486 | CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); |
9487 | |
9488 | /* Poll the ready bit */ |
9489 | for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { |
9490 | delay(50); |
9491 | i2ccmd = CSR_READ(sc, WMREG_I2CCMD); |
9492 | if (i2ccmd & I2CCMD_READY) |
9493 | break; |
9494 | } |
9495 | if ((i2ccmd & I2CCMD_READY) == 0) |
9496 | aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n" ); |
9497 | if ((i2ccmd & I2CCMD_ERROR) != 0) |
9498 | aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n" ); |
9499 | |
9500 | sc->phy.release(sc); |
9501 | } |
9502 | |
9503 | /* TBI related */ |
9504 | |
9505 | /* |
9506 | * wm_tbi_mediainit: |
9507 | * |
9508 | * Initialize media for use on 1000BASE-X devices. |
9509 | */ |
9510 | static void |
9511 | wm_tbi_mediainit(struct wm_softc *sc) |
9512 | { |
9513 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
9514 | const char *sep = "" ; |
9515 | |
9516 | if (sc->sc_type < WM_T_82543) |
9517 | sc->sc_tipg = TIPG_WM_DFLT; |
9518 | else |
9519 | sc->sc_tipg = TIPG_LG_DFLT; |
9520 | |
9521 | sc->sc_tbi_serdes_anegticks = 5; |
9522 | |
9523 | /* Initialize our media structures */ |
9524 | sc->sc_mii.mii_ifp = ifp; |
9525 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
9526 | |
9527 | if ((sc->sc_type >= WM_T_82575) |
9528 | && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) |
9529 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, |
9530 | wm_serdes_mediachange, wm_serdes_mediastatus); |
9531 | else |
9532 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, |
9533 | wm_tbi_mediachange, wm_tbi_mediastatus); |
9534 | |
9535 | /* |
9536 | * SWD Pins: |
9537 | * |
9538 | * 0 = Link LED (output) |
9539 | * 1 = Loss Of Signal (input) |
9540 | */ |
9541 | sc->sc_ctrl |= CTRL_SWDPIO(0); |
9542 | |
9543 | /* XXX Perhaps this is only for TBI */ |
9544 | if (sc->sc_mediatype != WM_MEDIATYPE_SERDES) |
9545 | sc->sc_ctrl &= ~CTRL_SWDPIO(1); |
9546 | |
9547 | if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) |
9548 | sc->sc_ctrl &= ~CTRL_LRST; |
9549 | |
9550 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
9551 | |
9552 | #define ADD(ss, mm, dd) \ |
9553 | do { \ |
9554 | aprint_normal("%s%s", sep, ss); \ |
9555 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \ |
9556 | sep = ", "; \ |
9557 | } while (/*CONSTCOND*/0) |
9558 | |
9559 | aprint_normal_dev(sc->sc_dev, "" ); |
9560 | |
9561 | /* Only 82545 is LX */ |
9562 | if (sc->sc_type == WM_T_82545) { |
9563 | ADD("1000baseLX" , IFM_1000_LX, ANAR_X_HD); |
9564 | ADD("1000baseLX-FDX" , IFM_1000_LX | IFM_FDX, ANAR_X_FD); |
9565 | } else { |
9566 | ADD("1000baseSX" , IFM_1000_SX, ANAR_X_HD); |
9567 | ADD("1000baseSX-FDX" , IFM_1000_SX | IFM_FDX, ANAR_X_FD); |
9568 | } |
9569 | ADD("auto" , IFM_AUTO, ANAR_X_FD | ANAR_X_HD); |
9570 | aprint_normal("\n" ); |
9571 | |
9572 | #undef ADD |
9573 | |
9574 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); |
9575 | } |
9576 | |
9577 | /* |
9578 | * wm_tbi_mediachange: [ifmedia interface function] |
9579 | * |
9580 | * Set hardware to newly-selected media on a 1000BASE-X device. |
9581 | */ |
9582 | static int |
9583 | wm_tbi_mediachange(struct ifnet *ifp) |
9584 | { |
9585 | struct wm_softc *sc = ifp->if_softc; |
9586 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; |
9587 | uint32_t status; |
9588 | int i; |
9589 | |
9590 | if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) { |
9591 | /* XXX need some work for >= 82571 and < 82575 */ |
9592 | if (sc->sc_type < WM_T_82575) |
9593 | return 0; |
9594 | } |
9595 | |
9596 | if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572) |
9597 | || (sc->sc_type >= WM_T_82575)) |
9598 | CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK); |
9599 | |
9600 | sc->sc_ctrl &= ~CTRL_LRST; |
9601 | sc->sc_txcw = TXCW_ANE; |
9602 | if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) |
9603 | sc->sc_txcw |= TXCW_FD | TXCW_HD; |
9604 | else if (ife->ifm_media & IFM_FDX) |
9605 | sc->sc_txcw |= TXCW_FD; |
9606 | else |
9607 | sc->sc_txcw |= TXCW_HD; |
9608 | |
9609 | if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) |
9610 | sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE; |
9611 | |
9612 | DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n" , |
9613 | device_xname(sc->sc_dev), sc->sc_txcw)); |
9614 | CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); |
9615 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
9616 | CSR_WRITE_FLUSH(sc); |
9617 | delay(1000); |
9618 | |
9619 | i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); |
9620 | DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n" , device_xname(sc->sc_dev),i)); |
9621 | |
9622 | /* |
9623 | * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the |
9624 | * optics detect a signal, 0 if they don't. |
9625 | */ |
9626 | if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) { |
9627 | /* Have signal; wait for the link to come up. */ |
9628 | for (i = 0; i < WM_LINKUP_TIMEOUT; i++) { |
9629 | delay(10000); |
9630 | if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) |
9631 | break; |
9632 | } |
9633 | |
9634 | DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n" , |
9635 | device_xname(sc->sc_dev),i)); |
9636 | |
9637 | status = CSR_READ(sc, WMREG_STATUS); |
9638 | DPRINTF(WM_DEBUG_LINK, |
9639 | ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n" , |
9640 | device_xname(sc->sc_dev),status, STATUS_LU)); |
9641 | if (status & STATUS_LU) { |
9642 | /* Link is up. */ |
9643 | DPRINTF(WM_DEBUG_LINK, |
9644 | ("%s: LINK: set media -> link up %s\n" , |
9645 | device_xname(sc->sc_dev), |
9646 | (status & STATUS_FD) ? "FDX" : "HDX" )); |
9647 | |
9648 | /* |
9649 | * NOTE: CTRL will update TFCE and RFCE automatically, |
9650 | * so we should update sc->sc_ctrl |
9651 | */ |
9652 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); |
9653 | sc->sc_tctl &= ~TCTL_COLD(0x3ff); |
9654 | sc->sc_fcrtl &= ~FCRTL_XONE; |
9655 | if (status & STATUS_FD) |
9656 | sc->sc_tctl |= |
9657 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
9658 | else |
9659 | sc->sc_tctl |= |
9660 | TCTL_COLD(TX_COLLISION_DISTANCE_HDX); |
9661 | if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) |
9662 | sc->sc_fcrtl |= FCRTL_XONE; |
9663 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
9664 | CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? |
9665 | WMREG_OLD_FCRTL : WMREG_FCRTL, |
9666 | sc->sc_fcrtl); |
9667 | sc->sc_tbi_linkup = 1; |
9668 | } else { |
9669 | if (i == WM_LINKUP_TIMEOUT) |
9670 | wm_check_for_link(sc); |
9671 | /* Link is down. */ |
9672 | DPRINTF(WM_DEBUG_LINK, |
9673 | ("%s: LINK: set media -> link down\n" , |
9674 | device_xname(sc->sc_dev))); |
9675 | sc->sc_tbi_linkup = 0; |
9676 | } |
9677 | } else { |
9678 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n" , |
9679 | device_xname(sc->sc_dev))); |
9680 | sc->sc_tbi_linkup = 0; |
9681 | } |
9682 | |
9683 | wm_tbi_serdes_set_linkled(sc); |
9684 | |
9685 | return 0; |
9686 | } |
9687 | |
9688 | /* |
9689 | * wm_tbi_mediastatus: [ifmedia interface function] |
9690 | * |
9691 | * Get the current interface media status on a 1000BASE-X device. |
9692 | */ |
9693 | static void |
9694 | wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
9695 | { |
9696 | struct wm_softc *sc = ifp->if_softc; |
9697 | uint32_t ctrl, status; |
9698 | |
9699 | ifmr->ifm_status = IFM_AVALID; |
9700 | ifmr->ifm_active = IFM_ETHER; |
9701 | |
9702 | status = CSR_READ(sc, WMREG_STATUS); |
9703 | if ((status & STATUS_LU) == 0) { |
9704 | ifmr->ifm_active |= IFM_NONE; |
9705 | return; |
9706 | } |
9707 | |
9708 | ifmr->ifm_status |= IFM_ACTIVE; |
9709 | /* Only 82545 is LX */ |
9710 | if (sc->sc_type == WM_T_82545) |
9711 | ifmr->ifm_active |= IFM_1000_LX; |
9712 | else |
9713 | ifmr->ifm_active |= IFM_1000_SX; |
9714 | if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) |
9715 | ifmr->ifm_active |= IFM_FDX; |
9716 | else |
9717 | ifmr->ifm_active |= IFM_HDX; |
9718 | ctrl = CSR_READ(sc, WMREG_CTRL); |
9719 | if (ctrl & CTRL_RFCE) |
9720 | ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; |
9721 | if (ctrl & CTRL_TFCE) |
9722 | ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; |
9723 | } |
9724 | |
9725 | /* XXX TBI only */ |
9726 | static int |
9727 | wm_check_for_link(struct wm_softc *sc) |
9728 | { |
9729 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; |
9730 | uint32_t rxcw; |
9731 | uint32_t ctrl; |
9732 | uint32_t status; |
9733 | uint32_t sig; |
9734 | |
9735 | if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) { |
9736 | /* XXX need some work for >= 82571 */ |
9737 | if (sc->sc_type >= WM_T_82571) { |
9738 | sc->sc_tbi_linkup = 1; |
9739 | return 0; |
9740 | } |
9741 | } |
9742 | |
9743 | rxcw = CSR_READ(sc, WMREG_RXCW); |
9744 | ctrl = CSR_READ(sc, WMREG_CTRL); |
9745 | status = CSR_READ(sc, WMREG_STATUS); |
9746 | |
9747 | sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0; |
9748 | |
9749 | DPRINTF(WM_DEBUG_LINK, |
9750 | ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n" , |
9751 | device_xname(sc->sc_dev), __func__, |
9752 | ((ctrl & CTRL_SWDPIN(1)) == sig), |
9753 | ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0))); |
9754 | |
9755 | /* |
9756 | * SWDPIN LU RXCW |
9757 | * 0 0 0 |
9758 | * 0 0 1 (should not happen) |
9759 | * 0 1 0 (should not happen) |
9760 | * 0 1 1 (should not happen) |
9761 | * 1 0 0 Disable autonego and force linkup |
9762 | * 1 0 1 got /C/ but not linkup yet |
9763 | * 1 1 0 (linkup) |
9764 | * 1 1 1 If IFM_AUTO, back to autonego |
9765 | * |
9766 | */ |
9767 | if (((ctrl & CTRL_SWDPIN(1)) == sig) |
9768 | && ((status & STATUS_LU) == 0) |
9769 | && ((rxcw & RXCW_C) == 0)) { |
9770 | DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n" , |
9771 | __func__)); |
9772 | sc->sc_tbi_linkup = 0; |
9773 | /* Disable auto-negotiation in the TXCW register */ |
9774 | CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE)); |
9775 | |
9776 | /* |
9777 | * Force link-up and also force full-duplex. |
9778 | * |
9779 | * NOTE: CTRL was updated TFCE and RFCE automatically, |
9780 | * so we should update sc->sc_ctrl |
9781 | */ |
9782 | sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD; |
9783 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
9784 | } else if (((status & STATUS_LU) != 0) |
9785 | && ((rxcw & RXCW_C) != 0) |
9786 | && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) { |
9787 | sc->sc_tbi_linkup = 1; |
9788 | DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n" , |
9789 | __func__)); |
9790 | CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); |
9791 | CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU)); |
9792 | } else if (((ctrl & CTRL_SWDPIN(1)) == sig) |
9793 | && ((rxcw & RXCW_C) != 0)) { |
9794 | DPRINTF(WM_DEBUG_LINK, ("/C/" )); |
9795 | } else { |
9796 | DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n" , __func__, rxcw, ctrl, |
9797 | status)); |
9798 | } |
9799 | |
9800 | return 0; |
9801 | } |
9802 | |
9803 | /* |
9804 | * wm_tbi_tick: |
9805 | * |
9806 | * Check the link on TBI devices. |
9807 | * This function acts as mii_tick(). |
9808 | */ |
9809 | static void |
9810 | wm_tbi_tick(struct wm_softc *sc) |
9811 | { |
9812 | struct mii_data *mii = &sc->sc_mii; |
9813 | struct ifmedia_entry *ife = mii->mii_media.ifm_cur; |
9814 | uint32_t status; |
9815 | |
9816 | KASSERT(WM_CORE_LOCKED(sc)); |
9817 | |
9818 | status = CSR_READ(sc, WMREG_STATUS); |
9819 | |
9820 | /* XXX is this needed? */ |
9821 | (void)CSR_READ(sc, WMREG_RXCW); |
9822 | (void)CSR_READ(sc, WMREG_CTRL); |
9823 | |
9824 | /* set link status */ |
9825 | if ((status & STATUS_LU) == 0) { |
9826 | DPRINTF(WM_DEBUG_LINK, |
9827 | ("%s: LINK: checklink -> down\n" , |
9828 | device_xname(sc->sc_dev))); |
9829 | sc->sc_tbi_linkup = 0; |
9830 | } else if (sc->sc_tbi_linkup == 0) { |
9831 | DPRINTF(WM_DEBUG_LINK, |
9832 | ("%s: LINK: checklink -> up %s\n" , |
9833 | device_xname(sc->sc_dev), |
9834 | (status & STATUS_FD) ? "FDX" : "HDX" )); |
9835 | sc->sc_tbi_linkup = 1; |
9836 | sc->sc_tbi_serdes_ticks = 0; |
9837 | } |
9838 | |
9839 | if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0) |
9840 | goto setled; |
9841 | |
9842 | if ((status & STATUS_LU) == 0) { |
9843 | sc->sc_tbi_linkup = 0; |
9844 | /* If the timer expired, retry autonegotiation */ |
9845 | if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) |
9846 | && (++sc->sc_tbi_serdes_ticks |
9847 | >= sc->sc_tbi_serdes_anegticks)) { |
9848 | DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n" )); |
9849 | sc->sc_tbi_serdes_ticks = 0; |
9850 | /* |
9851 | * Reset the link, and let autonegotiation do |
9852 | * its thing |
9853 | */ |
9854 | sc->sc_ctrl |= CTRL_LRST; |
9855 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
9856 | CSR_WRITE_FLUSH(sc); |
9857 | delay(1000); |
9858 | sc->sc_ctrl &= ~CTRL_LRST; |
9859 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
9860 | CSR_WRITE_FLUSH(sc); |
9861 | delay(1000); |
9862 | CSR_WRITE(sc, WMREG_TXCW, |
9863 | sc->sc_txcw & ~TXCW_ANE); |
9864 | CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); |
9865 | } |
9866 | } |
9867 | |
9868 | setled: |
9869 | wm_tbi_serdes_set_linkled(sc); |
9870 | } |
9871 | |
9872 | /* SERDES related */ |
9873 | static void |
9874 | wm_serdes_power_up_link_82575(struct wm_softc *sc) |
9875 | { |
9876 | uint32_t reg; |
9877 | |
9878 | if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES) |
9879 | && ((sc->sc_flags & WM_F_SGMII) == 0)) |
9880 | return; |
9881 | |
9882 | reg = CSR_READ(sc, WMREG_PCS_CFG); |
9883 | reg |= PCS_CFG_PCS_EN; |
9884 | CSR_WRITE(sc, WMREG_PCS_CFG, reg); |
9885 | |
9886 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
9887 | reg &= ~CTRL_EXT_SWDPIN(3); |
9888 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
9889 | CSR_WRITE_FLUSH(sc); |
9890 | } |
9891 | |
9892 | static int |
9893 | wm_serdes_mediachange(struct ifnet *ifp) |
9894 | { |
9895 | struct wm_softc *sc = ifp->if_softc; |
9896 | bool pcs_autoneg = true; /* XXX */ |
9897 | uint32_t ctrl_ext, pcs_lctl, reg; |
9898 | |
9899 | /* XXX Currently, this function is not called on 8257[12] */ |
9900 | if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572) |
9901 | || (sc->sc_type >= WM_T_82575)) |
9902 | CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK); |
9903 | |
9904 | wm_serdes_power_up_link_82575(sc); |
9905 | |
9906 | sc->sc_ctrl |= CTRL_SLU; |
9907 | |
9908 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) |
9909 | sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1); |
9910 | |
9911 | ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); |
9912 | pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL); |
9913 | switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) { |
9914 | case CTRL_EXT_LINK_MODE_SGMII: |
9915 | pcs_autoneg = true; |
9916 | pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT; |
9917 | break; |
9918 | case CTRL_EXT_LINK_MODE_1000KX: |
9919 | pcs_autoneg = false; |
9920 | /* FALLTHROUGH */ |
9921 | default: |
9922 | if ((sc->sc_type == WM_T_82575) |
9923 | || (sc->sc_type == WM_T_82576)) { |
9924 | if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0) |
9925 | pcs_autoneg = false; |
9926 | } |
9927 | sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD |
9928 | | CTRL_FRCFDX; |
9929 | pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL; |
9930 | } |
9931 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
9932 | |
9933 | if (pcs_autoneg) { |
9934 | pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART; |
9935 | pcs_lctl &= ~PCS_LCTL_FORCE_FC; |
9936 | |
9937 | reg = CSR_READ(sc, WMREG_PCS_ANADV); |
9938 | reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE); |
9939 | reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE; |
9940 | CSR_WRITE(sc, WMREG_PCS_ANADV, reg); |
9941 | } else |
9942 | pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC; |
9943 | |
9944 | CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl); |
9945 | |
9946 | |
9947 | return 0; |
9948 | } |
9949 | |
9950 | static void |
9951 | wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
9952 | { |
9953 | struct wm_softc *sc = ifp->if_softc; |
9954 | struct mii_data *mii = &sc->sc_mii; |
9955 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; |
9956 | uint32_t pcs_adv, pcs_lpab, reg; |
9957 | |
9958 | ifmr->ifm_status = IFM_AVALID; |
9959 | ifmr->ifm_active = IFM_ETHER; |
9960 | |
9961 | /* Check PCS */ |
9962 | reg = CSR_READ(sc, WMREG_PCS_LSTS); |
9963 | if ((reg & PCS_LSTS_LINKOK) == 0) { |
9964 | ifmr->ifm_active |= IFM_NONE; |
9965 | sc->sc_tbi_linkup = 0; |
9966 | goto setled; |
9967 | } |
9968 | |
9969 | sc->sc_tbi_linkup = 1; |
9970 | ifmr->ifm_status |= IFM_ACTIVE; |
9971 | ifmr->ifm_active |= IFM_1000_SX; /* XXX */ |
9972 | if ((reg & PCS_LSTS_FDX) != 0) |
9973 | ifmr->ifm_active |= IFM_FDX; |
9974 | else |
9975 | ifmr->ifm_active |= IFM_HDX; |
9976 | mii->mii_media_active &= ~IFM_ETH_FMASK; |
9977 | if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { |
9978 | /* Check flow */ |
9979 | reg = CSR_READ(sc, WMREG_PCS_LSTS); |
9980 | if ((reg & PCS_LSTS_AN_COMP) == 0) { |
9981 | DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n" )); |
9982 | goto setled; |
9983 | } |
9984 | pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); |
9985 | pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); |
9986 | DPRINTF(WM_DEBUG_LINK, |
9987 | ("XXX AN result(2) %08x, %08x\n" , pcs_adv, pcs_lpab)); |
9988 | if ((pcs_adv & TXCW_SYM_PAUSE) |
9989 | && (pcs_lpab & TXCW_SYM_PAUSE)) { |
9990 | mii->mii_media_active |= IFM_FLOW |
9991 | | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
9992 | } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) |
9993 | && (pcs_adv & TXCW_ASYM_PAUSE) |
9994 | && (pcs_lpab & TXCW_SYM_PAUSE) |
9995 | && (pcs_lpab & TXCW_ASYM_PAUSE)) { |
9996 | mii->mii_media_active |= IFM_FLOW |
9997 | | IFM_ETH_TXPAUSE; |
9998 | } else if ((pcs_adv & TXCW_SYM_PAUSE) |
9999 | && (pcs_adv & TXCW_ASYM_PAUSE) |
10000 | && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) |
10001 | && (pcs_lpab & TXCW_ASYM_PAUSE)) { |
10002 | mii->mii_media_active |= IFM_FLOW |
10003 | | IFM_ETH_RXPAUSE; |
10004 | } else { |
10005 | } |
10006 | } |
10007 | ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
10008 | | (mii->mii_media_active & IFM_ETH_FMASK); |
10009 | setled: |
10010 | wm_tbi_serdes_set_linkled(sc); |
10011 | } |
10012 | |
10013 | /* |
10014 | * wm_serdes_tick: |
10015 | * |
10016 | * Check the link on serdes devices. |
10017 | */ |
10018 | static void |
10019 | wm_serdes_tick(struct wm_softc *sc) |
10020 | { |
10021 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
10022 | struct mii_data *mii = &sc->sc_mii; |
10023 | struct ifmedia_entry *ife = mii->mii_media.ifm_cur; |
10024 | uint32_t reg; |
10025 | |
10026 | KASSERT(WM_CORE_LOCKED(sc)); |
10027 | |
10028 | mii->mii_media_status = IFM_AVALID; |
10029 | mii->mii_media_active = IFM_ETHER; |
10030 | |
10031 | /* Check PCS */ |
10032 | reg = CSR_READ(sc, WMREG_PCS_LSTS); |
10033 | if ((reg & PCS_LSTS_LINKOK) != 0) { |
10034 | mii->mii_media_status |= IFM_ACTIVE; |
10035 | sc->sc_tbi_linkup = 1; |
10036 | sc->sc_tbi_serdes_ticks = 0; |
10037 | mii->mii_media_active |= IFM_1000_SX; /* XXX */ |
10038 | if ((reg & PCS_LSTS_FDX) != 0) |
10039 | mii->mii_media_active |= IFM_FDX; |
10040 | else |
10041 | mii->mii_media_active |= IFM_HDX; |
10042 | } else { |
10043 | mii->mii_media_status |= IFM_NONE; |
10044 | sc->sc_tbi_linkup = 0; |
10045 | /* If the timer expired, retry autonegotiation */ |
10046 | if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) |
10047 | && (++sc->sc_tbi_serdes_ticks |
10048 | >= sc->sc_tbi_serdes_anegticks)) { |
10049 | DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n" )); |
10050 | sc->sc_tbi_serdes_ticks = 0; |
10051 | /* XXX */ |
10052 | wm_serdes_mediachange(ifp); |
10053 | } |
10054 | } |
10055 | |
10056 | wm_tbi_serdes_set_linkled(sc); |
10057 | } |
10058 | |
10059 | /* SFP related */ |
10060 | |
10061 | static int |
10062 | wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data) |
10063 | { |
10064 | uint32_t i2ccmd; |
10065 | int i; |
10066 | |
10067 | i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ; |
10068 | CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); |
10069 | |
10070 | /* Poll the ready bit */ |
10071 | for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { |
10072 | delay(50); |
10073 | i2ccmd = CSR_READ(sc, WMREG_I2CCMD); |
10074 | if (i2ccmd & I2CCMD_READY) |
10075 | break; |
10076 | } |
10077 | if ((i2ccmd & I2CCMD_READY) == 0) |
10078 | return -1; |
10079 | if ((i2ccmd & I2CCMD_ERROR) != 0) |
10080 | return -1; |
10081 | |
10082 | *data = i2ccmd & 0x00ff; |
10083 | |
10084 | return 0; |
10085 | } |
10086 | |
10087 | static uint32_t |
10088 | wm_sfp_get_media_type(struct wm_softc *sc) |
10089 | { |
10090 | uint32_t ctrl_ext; |
10091 | uint8_t val = 0; |
10092 | int timeout = 3; |
10093 | uint32_t mediatype = WM_MEDIATYPE_UNKNOWN; |
10094 | int rv = -1; |
10095 | |
10096 | ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); |
10097 | ctrl_ext &= ~CTRL_EXT_SWDPIN(3); |
10098 | CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA); |
10099 | CSR_WRITE_FLUSH(sc); |
10100 | |
10101 | /* Read SFP module data */ |
10102 | while (timeout) { |
10103 | rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val); |
10104 | if (rv == 0) |
10105 | break; |
10106 | delay(100*1000); /* XXX too big */ |
10107 | timeout--; |
10108 | } |
10109 | if (rv != 0) |
10110 | goto out; |
10111 | switch (val) { |
10112 | case SFF_SFP_ID_SFF: |
10113 | aprint_normal_dev(sc->sc_dev, |
10114 | "Module/Connector soldered to board\n" ); |
10115 | break; |
10116 | case SFF_SFP_ID_SFP: |
10117 | aprint_normal_dev(sc->sc_dev, "SFP\n" ); |
10118 | break; |
10119 | case SFF_SFP_ID_UNKNOWN: |
10120 | goto out; |
10121 | default: |
10122 | break; |
10123 | } |
10124 | |
10125 | rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val); |
10126 | if (rv != 0) { |
10127 | goto out; |
10128 | } |
10129 | |
10130 | if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0) |
10131 | mediatype = WM_MEDIATYPE_SERDES; |
10132 | else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){ |
10133 | sc->sc_flags |= WM_F_SGMII; |
10134 | mediatype = WM_MEDIATYPE_COPPER; |
10135 | } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){ |
10136 | sc->sc_flags |= WM_F_SGMII; |
10137 | mediatype = WM_MEDIATYPE_SERDES; |
10138 | } |
10139 | |
10140 | out: |
10141 | /* Restore I2C interface setting */ |
10142 | CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); |
10143 | |
10144 | return mediatype; |
10145 | } |
10146 | |
10147 | /* |
10148 | * NVM related. |
10149 | * Microwire, SPI (w/wo EERD) and Flash. |
10150 | */ |
10151 | |
10152 | /* Both spi and uwire */ |
10153 | |
10154 | /* |
10155 | * wm_eeprom_sendbits: |
10156 | * |
10157 | * Send a series of bits to the EEPROM. |
10158 | */ |
10159 | static void |
10160 | wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) |
10161 | { |
10162 | uint32_t reg; |
10163 | int x; |
10164 | |
10165 | reg = CSR_READ(sc, WMREG_EECD); |
10166 | |
10167 | for (x = nbits; x > 0; x--) { |
10168 | if (bits & (1U << (x - 1))) |
10169 | reg |= EECD_DI; |
10170 | else |
10171 | reg &= ~EECD_DI; |
10172 | CSR_WRITE(sc, WMREG_EECD, reg); |
10173 | CSR_WRITE_FLUSH(sc); |
10174 | delay(2); |
10175 | CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); |
10176 | CSR_WRITE_FLUSH(sc); |
10177 | delay(2); |
10178 | CSR_WRITE(sc, WMREG_EECD, reg); |
10179 | CSR_WRITE_FLUSH(sc); |
10180 | delay(2); |
10181 | } |
10182 | } |
10183 | |
10184 | /* |
10185 | * wm_eeprom_recvbits: |
10186 | * |
10187 | * Receive a series of bits from the EEPROM. |
10188 | */ |
10189 | static void |
10190 | wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) |
10191 | { |
10192 | uint32_t reg, val; |
10193 | int x; |
10194 | |
10195 | reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; |
10196 | |
10197 | val = 0; |
10198 | for (x = nbits; x > 0; x--) { |
10199 | CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); |
10200 | CSR_WRITE_FLUSH(sc); |
10201 | delay(2); |
10202 | if (CSR_READ(sc, WMREG_EECD) & EECD_DO) |
10203 | val |= (1U << (x - 1)); |
10204 | CSR_WRITE(sc, WMREG_EECD, reg); |
10205 | CSR_WRITE_FLUSH(sc); |
10206 | delay(2); |
10207 | } |
10208 | *valp = val; |
10209 | } |
10210 | |
10211 | /* Microwire */ |
10212 | |
10213 | /* |
10214 | * wm_nvm_read_uwire: |
10215 | * |
10216 | * Read a word from the EEPROM using the MicroWire protocol. |
10217 | */ |
10218 | static int |
10219 | wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) |
10220 | { |
10221 | uint32_t reg, val; |
10222 | int i; |
10223 | |
10224 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10225 | device_xname(sc->sc_dev), __func__)); |
10226 | |
10227 | for (i = 0; i < wordcnt; i++) { |
10228 | /* Clear SK and DI. */ |
10229 | reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); |
10230 | CSR_WRITE(sc, WMREG_EECD, reg); |
10231 | |
10232 | /* |
10233 | * XXX: workaround for a bug in qemu-0.12.x and prior |
10234 | * and Xen. |
10235 | * |
10236 | * We use this workaround only for 82540 because qemu's |
10237 | * e1000 act as 82540. |
10238 | */ |
10239 | if (sc->sc_type == WM_T_82540) { |
10240 | reg |= EECD_SK; |
10241 | CSR_WRITE(sc, WMREG_EECD, reg); |
10242 | reg &= ~EECD_SK; |
10243 | CSR_WRITE(sc, WMREG_EECD, reg); |
10244 | CSR_WRITE_FLUSH(sc); |
10245 | delay(2); |
10246 | } |
10247 | /* XXX: end of workaround */ |
10248 | |
10249 | /* Set CHIP SELECT. */ |
10250 | reg |= EECD_CS; |
10251 | CSR_WRITE(sc, WMREG_EECD, reg); |
10252 | CSR_WRITE_FLUSH(sc); |
10253 | delay(2); |
10254 | |
10255 | /* Shift in the READ command. */ |
10256 | wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); |
10257 | |
10258 | /* Shift in address. */ |
10259 | wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits); |
10260 | |
10261 | /* Shift out the data. */ |
10262 | wm_eeprom_recvbits(sc, &val, 16); |
10263 | data[i] = val & 0xffff; |
10264 | |
10265 | /* Clear CHIP SELECT. */ |
10266 | reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; |
10267 | CSR_WRITE(sc, WMREG_EECD, reg); |
10268 | CSR_WRITE_FLUSH(sc); |
10269 | delay(2); |
10270 | } |
10271 | |
10272 | return 0; |
10273 | } |
10274 | |
10275 | /* SPI */ |
10276 | |
10277 | /* |
10278 | * Set SPI and FLASH related information from the EECD register. |
10279 | * For 82541 and 82547, the word size is taken from EEPROM. |
10280 | */ |
10281 | static int |
10282 | wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc) |
10283 | { |
10284 | int size; |
10285 | uint32_t reg; |
10286 | uint16_t data; |
10287 | |
10288 | reg = CSR_READ(sc, WMREG_EECD); |
10289 | sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; |
10290 | |
10291 | /* Read the size of NVM from EECD by default */ |
10292 | size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK); |
10293 | switch (sc->sc_type) { |
10294 | case WM_T_82541: |
10295 | case WM_T_82541_2: |
10296 | case WM_T_82547: |
10297 | case WM_T_82547_2: |
10298 | /* Set dummy value to access EEPROM */ |
10299 | sc->sc_nvm_wordsize = 64; |
10300 | wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data); |
10301 | reg = data; |
10302 | size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK); |
10303 | if (size == 0) |
10304 | size = 6; /* 64 word size */ |
10305 | else |
10306 | size += NVM_WORD_SIZE_BASE_SHIFT + 1; |
10307 | break; |
10308 | case WM_T_80003: |
10309 | case WM_T_82571: |
10310 | case WM_T_82572: |
10311 | case WM_T_82573: /* SPI case */ |
10312 | case WM_T_82574: /* SPI case */ |
10313 | case WM_T_82583: /* SPI case */ |
10314 | size += NVM_WORD_SIZE_BASE_SHIFT; |
10315 | if (size > 14) |
10316 | size = 14; |
10317 | break; |
10318 | case WM_T_82575: |
10319 | case WM_T_82576: |
10320 | case WM_T_82580: |
10321 | case WM_T_I350: |
10322 | case WM_T_I354: |
10323 | case WM_T_I210: |
10324 | case WM_T_I211: |
10325 | size += NVM_WORD_SIZE_BASE_SHIFT; |
10326 | if (size > 15) |
10327 | size = 15; |
10328 | break; |
10329 | default: |
10330 | aprint_error_dev(sc->sc_dev, |
10331 | "%s: unknown device(%d)?\n" , __func__, sc->sc_type); |
10332 | return -1; |
10333 | break; |
10334 | } |
10335 | |
10336 | sc->sc_nvm_wordsize = 1 << size; |
10337 | |
10338 | return 0; |
10339 | } |
10340 | |
10341 | /* |
10342 | * wm_nvm_ready_spi: |
10343 | * |
10344 | * Wait for a SPI EEPROM to be ready for commands. |
10345 | */ |
10346 | static int |
10347 | wm_nvm_ready_spi(struct wm_softc *sc) |
10348 | { |
10349 | uint32_t val; |
10350 | int usec; |
10351 | |
10352 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10353 | device_xname(sc->sc_dev), __func__)); |
10354 | |
10355 | for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { |
10356 | wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); |
10357 | wm_eeprom_recvbits(sc, &val, 8); |
10358 | if ((val & SPI_SR_RDY) == 0) |
10359 | break; |
10360 | } |
10361 | if (usec >= SPI_MAX_RETRIES) { |
10362 | aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n" ); |
10363 | return 1; |
10364 | } |
10365 | return 0; |
10366 | } |
10367 | |
10368 | /* |
10369 | * wm_nvm_read_spi: |
10370 | * |
10371 | * Read a work from the EEPROM using the SPI protocol. |
10372 | */ |
10373 | static int |
10374 | wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) |
10375 | { |
10376 | uint32_t reg, val; |
10377 | int i; |
10378 | uint8_t opc; |
10379 | |
10380 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10381 | device_xname(sc->sc_dev), __func__)); |
10382 | |
10383 | /* Clear SK and CS. */ |
10384 | reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); |
10385 | CSR_WRITE(sc, WMREG_EECD, reg); |
10386 | CSR_WRITE_FLUSH(sc); |
10387 | delay(2); |
10388 | |
10389 | if (wm_nvm_ready_spi(sc)) |
10390 | return 1; |
10391 | |
10392 | /* Toggle CS to flush commands. */ |
10393 | CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); |
10394 | CSR_WRITE_FLUSH(sc); |
10395 | delay(2); |
10396 | CSR_WRITE(sc, WMREG_EECD, reg); |
10397 | CSR_WRITE_FLUSH(sc); |
10398 | delay(2); |
10399 | |
10400 | opc = SPI_OPC_READ; |
10401 | if (sc->sc_nvm_addrbits == 8 && word >= 128) |
10402 | opc |= SPI_OPC_A8; |
10403 | |
10404 | wm_eeprom_sendbits(sc, opc, 8); |
10405 | wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits); |
10406 | |
10407 | for (i = 0; i < wordcnt; i++) { |
10408 | wm_eeprom_recvbits(sc, &val, 16); |
10409 | data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); |
10410 | } |
10411 | |
10412 | /* Raise CS and clear SK. */ |
10413 | reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; |
10414 | CSR_WRITE(sc, WMREG_EECD, reg); |
10415 | CSR_WRITE_FLUSH(sc); |
10416 | delay(2); |
10417 | |
10418 | return 0; |
10419 | } |
10420 | |
10421 | /* Using with EERD */ |
10422 | |
10423 | static int |
10424 | wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) |
10425 | { |
10426 | uint32_t attempts = 100000; |
10427 | uint32_t i, reg = 0; |
10428 | int32_t done = -1; |
10429 | |
10430 | for (i = 0; i < attempts; i++) { |
10431 | reg = CSR_READ(sc, rw); |
10432 | |
10433 | if (reg & EERD_DONE) { |
10434 | done = 0; |
10435 | break; |
10436 | } |
10437 | delay(5); |
10438 | } |
10439 | |
10440 | return done; |
10441 | } |
10442 | |
10443 | static int |
10444 | wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, |
10445 | uint16_t *data) |
10446 | { |
10447 | int i, eerd = 0; |
10448 | int error = 0; |
10449 | |
10450 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10451 | device_xname(sc->sc_dev), __func__)); |
10452 | |
10453 | for (i = 0; i < wordcnt; i++) { |
10454 | eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; |
10455 | |
10456 | CSR_WRITE(sc, WMREG_EERD, eerd); |
10457 | error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); |
10458 | if (error != 0) |
10459 | break; |
10460 | |
10461 | data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); |
10462 | } |
10463 | |
10464 | return error; |
10465 | } |
10466 | |
10467 | /* Flash */ |
10468 | |
10469 | static int |
10470 | wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank) |
10471 | { |
10472 | uint32_t eecd; |
10473 | uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; |
10474 | uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); |
10475 | uint8_t sig_byte = 0; |
10476 | |
10477 | switch (sc->sc_type) { |
10478 | case WM_T_PCH_SPT: |
10479 | /* |
10480 | * In SPT, read from the CTRL_EXT reg instead of accessing the |
10481 | * sector valid bits from the NVM. |
10482 | */ |
10483 | *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS; |
10484 | if ((*bank == 0) || (*bank == 1)) { |
10485 | aprint_error_dev(sc->sc_dev, |
10486 | "%s: no valid NVM bank present (%u)\n" , __func__, |
10487 | *bank); |
10488 | return -1; |
10489 | } else { |
10490 | *bank = *bank - 2; |
10491 | return 0; |
10492 | } |
10493 | case WM_T_ICH8: |
10494 | case WM_T_ICH9: |
10495 | eecd = CSR_READ(sc, WMREG_EECD); |
10496 | if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) { |
10497 | *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0; |
10498 | return 0; |
10499 | } |
10500 | /* FALLTHROUGH */ |
10501 | default: |
10502 | /* Default to 0 */ |
10503 | *bank = 0; |
10504 | |
10505 | /* Check bank 0 */ |
10506 | wm_read_ich8_byte(sc, act_offset, &sig_byte); |
10507 | if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { |
10508 | *bank = 0; |
10509 | return 0; |
10510 | } |
10511 | |
10512 | /* Check bank 1 */ |
10513 | wm_read_ich8_byte(sc, act_offset + bank1_offset, |
10514 | &sig_byte); |
10515 | if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { |
10516 | *bank = 1; |
10517 | return 0; |
10518 | } |
10519 | } |
10520 | |
10521 | DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n" , |
10522 | device_xname(sc->sc_dev))); |
10523 | return -1; |
10524 | } |
10525 | |
10526 | /****************************************************************************** |
10527 | * This function does initial flash setup so that a new read/write/erase cycle |
10528 | * can be started. |
10529 | * |
10530 | * sc - The pointer to the hw structure |
10531 | ****************************************************************************/ |
10532 | static int32_t |
10533 | wm_ich8_cycle_init(struct wm_softc *sc) |
10534 | { |
10535 | uint16_t hsfsts; |
10536 | int32_t error = 1; |
10537 | int32_t i = 0; |
10538 | |
10539 | hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); |
10540 | |
10541 | /* May be check the Flash Des Valid bit in Hw status */ |
10542 | if ((hsfsts & HSFSTS_FLDVAL) == 0) { |
10543 | return error; |
10544 | } |
10545 | |
10546 | /* Clear FCERR in Hw status by writing 1 */ |
10547 | /* Clear DAEL in Hw status by writing a 1 */ |
10548 | hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; |
10549 | |
10550 | ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); |
10551 | |
10552 | /* |
10553 | * Either we should have a hardware SPI cycle in progress bit to check |
10554 | * against, in order to start a new cycle or FDONE bit should be |
10555 | * changed in the hardware so that it is 1 after harware reset, which |
10556 | * can then be used as an indication whether a cycle is in progress or |
10557 | * has been completed .. we should also have some software semaphore |
10558 | * mechanism to guard FDONE or the cycle in progress bit so that two |
10559 | * threads access to those bits can be sequentiallized or a way so that |
10560 | * 2 threads dont start the cycle at the same time |
10561 | */ |
10562 | |
10563 | if ((hsfsts & HSFSTS_FLINPRO) == 0) { |
10564 | /* |
10565 | * There is no cycle running at present, so we can start a |
10566 | * cycle |
10567 | */ |
10568 | |
10569 | /* Begin by setting Flash Cycle Done. */ |
10570 | hsfsts |= HSFSTS_DONE; |
10571 | ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); |
10572 | error = 0; |
10573 | } else { |
10574 | /* |
10575 | * otherwise poll for sometime so the current cycle has a |
10576 | * chance to end before giving up. |
10577 | */ |
10578 | for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { |
10579 | hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); |
10580 | if ((hsfsts & HSFSTS_FLINPRO) == 0) { |
10581 | error = 0; |
10582 | break; |
10583 | } |
10584 | delay(1); |
10585 | } |
10586 | if (error == 0) { |
10587 | /* |
10588 | * Successful in waiting for previous cycle to timeout, |
10589 | * now set the Flash Cycle Done. |
10590 | */ |
10591 | hsfsts |= HSFSTS_DONE; |
10592 | ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); |
10593 | } |
10594 | } |
10595 | return error; |
10596 | } |
10597 | |
10598 | /****************************************************************************** |
10599 | * This function starts a flash cycle and waits for its completion |
10600 | * |
10601 | * sc - The pointer to the hw structure |
10602 | ****************************************************************************/ |
10603 | static int32_t |
10604 | wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) |
10605 | { |
10606 | uint16_t hsflctl; |
10607 | uint16_t hsfsts; |
10608 | int32_t error = 1; |
10609 | uint32_t i = 0; |
10610 | |
10611 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ |
10612 | hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); |
10613 | hsflctl |= HSFCTL_GO; |
10614 | ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); |
10615 | |
10616 | /* Wait till FDONE bit is set to 1 */ |
10617 | do { |
10618 | hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); |
10619 | if (hsfsts & HSFSTS_DONE) |
10620 | break; |
10621 | delay(1); |
10622 | i++; |
10623 | } while (i < timeout); |
10624 | if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) |
10625 | error = 0; |
10626 | |
10627 | return error; |
10628 | } |
10629 | |
10630 | /****************************************************************************** |
10631 | * Reads a byte or (d)word from the NVM using the ICH8 flash access registers. |
10632 | * |
10633 | * sc - The pointer to the hw structure |
10634 | * index - The index of the byte or word to read. |
10635 | * size - Size of data to read, 1=byte 2=word, 4=dword |
10636 | * data - Pointer to the word to store the value read. |
10637 | *****************************************************************************/ |
10638 | static int32_t |
10639 | wm_read_ich8_data(struct wm_softc *sc, uint32_t index, |
10640 | uint32_t size, uint32_t *data) |
10641 | { |
10642 | uint16_t hsfsts; |
10643 | uint16_t hsflctl; |
10644 | uint32_t flash_linear_address; |
10645 | uint32_t flash_data = 0; |
10646 | int32_t error = 1; |
10647 | int32_t count = 0; |
10648 | |
10649 | if (size < 1 || size > 4 || data == 0x0 || |
10650 | index > ICH_FLASH_LINEAR_ADDR_MASK) |
10651 | return error; |
10652 | |
10653 | flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + |
10654 | sc->sc_ich8_flash_base; |
10655 | |
10656 | do { |
10657 | delay(1); |
10658 | /* Steps */ |
10659 | error = wm_ich8_cycle_init(sc); |
10660 | if (error) |
10661 | break; |
10662 | |
10663 | hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); |
10664 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ |
10665 | hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) |
10666 | & HSFCTL_BCOUNT_MASK; |
10667 | hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; |
10668 | if (sc->sc_type == WM_T_PCH_SPT) { |
10669 | /* |
10670 | * In SPT, This register is in Lan memory space, not |
10671 | * flash. Therefore, only 32 bit access is supported. |
10672 | */ |
10673 | ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL, |
10674 | (uint32_t)hsflctl); |
10675 | } else |
10676 | ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); |
10677 | |
10678 | /* |
10679 | * Write the last 24 bits of index into Flash Linear address |
10680 | * field in Flash Address |
10681 | */ |
10682 | /* TODO: TBD maybe check the index against the size of flash */ |
10683 | |
10684 | ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); |
10685 | |
10686 | error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); |
10687 | |
10688 | /* |
10689 | * Check if FCERR is set to 1, if set to 1, clear it and try |
10690 | * the whole sequence a few more times, else read in (shift in) |
10691 | * the Flash Data0, the order is least significant byte first |
10692 | * msb to lsb |
10693 | */ |
10694 | if (error == 0) { |
10695 | flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); |
10696 | if (size == 1) |
10697 | *data = (uint8_t)(flash_data & 0x000000FF); |
10698 | else if (size == 2) |
10699 | *data = (uint16_t)(flash_data & 0x0000FFFF); |
10700 | else if (size == 4) |
10701 | *data = (uint32_t)flash_data; |
10702 | break; |
10703 | } else { |
10704 | /* |
10705 | * If we've gotten here, then things are probably |
10706 | * completely hosed, but if the error condition is |
10707 | * detected, it won't hurt to give it another try... |
10708 | * ICH_FLASH_CYCLE_REPEAT_COUNT times. |
10709 | */ |
10710 | hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); |
10711 | if (hsfsts & HSFSTS_ERR) { |
10712 | /* Repeat for some time before giving up. */ |
10713 | continue; |
10714 | } else if ((hsfsts & HSFSTS_DONE) == 0) |
10715 | break; |
10716 | } |
10717 | } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); |
10718 | |
10719 | return error; |
10720 | } |
10721 | |
10722 | /****************************************************************************** |
10723 | * Reads a single byte from the NVM using the ICH8 flash access registers. |
10724 | * |
10725 | * sc - pointer to wm_hw structure |
10726 | * index - The index of the byte to read. |
10727 | * data - Pointer to a byte to store the value read. |
10728 | *****************************************************************************/ |
10729 | static int32_t |
10730 | wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) |
10731 | { |
10732 | int32_t status; |
10733 | uint32_t word = 0; |
10734 | |
10735 | status = wm_read_ich8_data(sc, index, 1, &word); |
10736 | if (status == 0) |
10737 | *data = (uint8_t)word; |
10738 | else |
10739 | *data = 0; |
10740 | |
10741 | return status; |
10742 | } |
10743 | |
10744 | /****************************************************************************** |
10745 | * Reads a word from the NVM using the ICH8 flash access registers. |
10746 | * |
10747 | * sc - pointer to wm_hw structure |
10748 | * index - The starting byte index of the word to read. |
10749 | * data - Pointer to a word to store the value read. |
10750 | *****************************************************************************/ |
10751 | static int32_t |
10752 | wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) |
10753 | { |
10754 | int32_t status; |
10755 | uint32_t word = 0; |
10756 | |
10757 | status = wm_read_ich8_data(sc, index, 2, &word); |
10758 | if (status == 0) |
10759 | *data = (uint16_t)word; |
10760 | else |
10761 | *data = 0; |
10762 | |
10763 | return status; |
10764 | } |
10765 | |
10766 | /****************************************************************************** |
10767 | * Reads a dword from the NVM using the ICH8 flash access registers. |
10768 | * |
10769 | * sc - pointer to wm_hw structure |
10770 | * index - The starting byte index of the word to read. |
10771 | * data - Pointer to a word to store the value read. |
10772 | *****************************************************************************/ |
10773 | static int32_t |
10774 | wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data) |
10775 | { |
10776 | int32_t status; |
10777 | |
10778 | status = wm_read_ich8_data(sc, index, 4, data); |
10779 | return status; |
10780 | } |
10781 | |
10782 | /****************************************************************************** |
10783 | * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access |
10784 | * register. |
10785 | * |
10786 | * sc - Struct containing variables accessed by shared code |
10787 | * offset - offset of word in the EEPROM to read |
10788 | * data - word read from the EEPROM |
10789 | * words - number of words to read |
10790 | *****************************************************************************/ |
10791 | static int |
10792 | wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) |
10793 | { |
10794 | int32_t error = 0; |
10795 | uint32_t flash_bank = 0; |
10796 | uint32_t act_offset = 0; |
10797 | uint32_t bank_offset = 0; |
10798 | uint16_t word = 0; |
10799 | uint16_t i = 0; |
10800 | |
10801 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10802 | device_xname(sc->sc_dev), __func__)); |
10803 | |
10804 | /* |
10805 | * We need to know which is the valid flash bank. In the event |
10806 | * that we didn't allocate eeprom_shadow_ram, we may not be |
10807 | * managing flash_bank. So it cannot be trusted and needs |
10808 | * to be updated with each read. |
10809 | */ |
10810 | error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); |
10811 | if (error) { |
10812 | DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n" , |
10813 | device_xname(sc->sc_dev))); |
10814 | flash_bank = 0; |
10815 | } |
10816 | |
10817 | /* |
10818 | * Adjust offset appropriately if we're on bank 1 - adjust for word |
10819 | * size |
10820 | */ |
10821 | bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); |
10822 | |
10823 | error = wm_get_swfwhw_semaphore(sc); |
10824 | if (error) { |
10825 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
10826 | __func__); |
10827 | return error; |
10828 | } |
10829 | |
10830 | for (i = 0; i < words; i++) { |
10831 | /* The NVM part needs a byte offset, hence * 2 */ |
10832 | act_offset = bank_offset + ((offset + i) * 2); |
10833 | error = wm_read_ich8_word(sc, act_offset, &word); |
10834 | if (error) { |
10835 | aprint_error_dev(sc->sc_dev, |
10836 | "%s: failed to read NVM\n" , __func__); |
10837 | break; |
10838 | } |
10839 | data[i] = word; |
10840 | } |
10841 | |
10842 | wm_put_swfwhw_semaphore(sc); |
10843 | return error; |
10844 | } |
10845 | |
10846 | /****************************************************************************** |
10847 | * Reads a 16 bit word or words from the EEPROM using the SPT's flash access |
10848 | * register. |
10849 | * |
10850 | * sc - Struct containing variables accessed by shared code |
10851 | * offset - offset of word in the EEPROM to read |
10852 | * data - word read from the EEPROM |
10853 | * words - number of words to read |
10854 | *****************************************************************************/ |
10855 | static int |
10856 | wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data) |
10857 | { |
10858 | int32_t error = 0; |
10859 | uint32_t flash_bank = 0; |
10860 | uint32_t act_offset = 0; |
10861 | uint32_t bank_offset = 0; |
10862 | uint32_t dword = 0; |
10863 | uint16_t i = 0; |
10864 | |
10865 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10866 | device_xname(sc->sc_dev), __func__)); |
10867 | |
10868 | /* |
10869 | * We need to know which is the valid flash bank. In the event |
10870 | * that we didn't allocate eeprom_shadow_ram, we may not be |
10871 | * managing flash_bank. So it cannot be trusted and needs |
10872 | * to be updated with each read. |
10873 | */ |
10874 | error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); |
10875 | if (error) { |
10876 | DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n" , |
10877 | device_xname(sc->sc_dev))); |
10878 | flash_bank = 0; |
10879 | } |
10880 | |
10881 | /* |
10882 | * Adjust offset appropriately if we're on bank 1 - adjust for word |
10883 | * size |
10884 | */ |
10885 | bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); |
10886 | |
10887 | error = wm_get_swfwhw_semaphore(sc); |
10888 | if (error) { |
10889 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
10890 | __func__); |
10891 | return error; |
10892 | } |
10893 | |
10894 | for (i = 0; i < words; i++) { |
10895 | /* The NVM part needs a byte offset, hence * 2 */ |
10896 | act_offset = bank_offset + ((offset + i) * 2); |
10897 | /* but we must read dword aligned, so mask ... */ |
10898 | error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword); |
10899 | if (error) { |
10900 | aprint_error_dev(sc->sc_dev, |
10901 | "%s: failed to read NVM\n" , __func__); |
10902 | break; |
10903 | } |
10904 | /* ... and pick out low or high word */ |
10905 | if ((act_offset & 0x2) == 0) |
10906 | data[i] = (uint16_t)(dword & 0xFFFF); |
10907 | else |
10908 | data[i] = (uint16_t)((dword >> 16) & 0xFFFF); |
10909 | } |
10910 | |
10911 | wm_put_swfwhw_semaphore(sc); |
10912 | return error; |
10913 | } |
10914 | |
10915 | /* iNVM */ |
10916 | |
10917 | static int |
10918 | wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data) |
10919 | { |
10920 | int32_t rv = 0; |
10921 | uint32_t invm_dword; |
10922 | uint16_t i; |
10923 | uint8_t record_type, word_address; |
10924 | |
10925 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10926 | device_xname(sc->sc_dev), __func__)); |
10927 | |
10928 | for (i = 0; i < INVM_SIZE; i++) { |
10929 | invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i)); |
10930 | /* Get record type */ |
10931 | record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); |
10932 | if (record_type == INVM_UNINITIALIZED_STRUCTURE) |
10933 | break; |
10934 | if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE) |
10935 | i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; |
10936 | if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE) |
10937 | i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; |
10938 | if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) { |
10939 | word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); |
10940 | if (word_address == address) { |
10941 | *data = INVM_DWORD_TO_WORD_DATA(invm_dword); |
10942 | rv = 0; |
10943 | break; |
10944 | } |
10945 | } |
10946 | } |
10947 | |
10948 | return rv; |
10949 | } |
10950 | |
10951 | static int |
10952 | wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data) |
10953 | { |
10954 | int rv = 0; |
10955 | int i; |
10956 | |
10957 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
10958 | device_xname(sc->sc_dev), __func__)); |
10959 | |
10960 | for (i = 0; i < words; i++) { |
10961 | switch (offset + i) { |
10962 | case NVM_OFF_MACADDR: |
10963 | case NVM_OFF_MACADDR1: |
10964 | case NVM_OFF_MACADDR2: |
10965 | rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]); |
10966 | if (rv != 0) { |
10967 | data[i] = 0xffff; |
10968 | rv = -1; |
10969 | } |
10970 | break; |
10971 | case NVM_OFF_CFG2: |
10972 | rv = wm_nvm_read_word_invm(sc, offset, data); |
10973 | if (rv != 0) { |
10974 | *data = NVM_INIT_CTRL_2_DEFAULT_I211; |
10975 | rv = 0; |
10976 | } |
10977 | break; |
10978 | case NVM_OFF_CFG4: |
10979 | rv = wm_nvm_read_word_invm(sc, offset, data); |
10980 | if (rv != 0) { |
10981 | *data = NVM_INIT_CTRL_4_DEFAULT_I211; |
10982 | rv = 0; |
10983 | } |
10984 | break; |
10985 | case NVM_OFF_LED_1_CFG: |
10986 | rv = wm_nvm_read_word_invm(sc, offset, data); |
10987 | if (rv != 0) { |
10988 | *data = NVM_LED_1_CFG_DEFAULT_I211; |
10989 | rv = 0; |
10990 | } |
10991 | break; |
10992 | case NVM_OFF_LED_0_2_CFG: |
10993 | rv = wm_nvm_read_word_invm(sc, offset, data); |
10994 | if (rv != 0) { |
10995 | *data = NVM_LED_0_2_CFG_DEFAULT_I211; |
10996 | rv = 0; |
10997 | } |
10998 | break; |
10999 | case NVM_OFF_ID_LED_SETTINGS: |
11000 | rv = wm_nvm_read_word_invm(sc, offset, data); |
11001 | if (rv != 0) { |
11002 | *data = ID_LED_RESERVED_FFFF; |
11003 | rv = 0; |
11004 | } |
11005 | break; |
11006 | default: |
11007 | DPRINTF(WM_DEBUG_NVM, |
11008 | ("NVM word 0x%02x is not mapped.\n" , offset)); |
11009 | *data = NVM_RESERVED_WORD; |
11010 | break; |
11011 | } |
11012 | } |
11013 | |
11014 | return rv; |
11015 | } |
11016 | |
11017 | /* Lock, detecting NVM type, validate checksum, version and read */ |
11018 | |
11019 | /* |
11020 | * wm_nvm_acquire: |
11021 | * |
11022 | * Perform the EEPROM handshake required on some chips. |
11023 | */ |
11024 | static int |
11025 | wm_nvm_acquire(struct wm_softc *sc) |
11026 | { |
11027 | uint32_t reg; |
11028 | int x; |
11029 | int ret = 0; |
11030 | |
11031 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
11032 | device_xname(sc->sc_dev), __func__)); |
11033 | |
11034 | if (sc->sc_type >= WM_T_ICH8) { |
11035 | ret = wm_get_nvm_ich8lan(sc); |
11036 | } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) { |
11037 | ret = wm_get_swfwhw_semaphore(sc); |
11038 | } else if (sc->sc_flags & WM_F_LOCK_SWFW) { |
11039 | /* This will also do wm_get_swsm_semaphore() if needed */ |
11040 | ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); |
11041 | } else if (sc->sc_flags & WM_F_LOCK_SWSM) { |
11042 | ret = wm_get_swsm_semaphore(sc); |
11043 | } |
11044 | |
11045 | if (ret) { |
11046 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n" , |
11047 | __func__); |
11048 | return 1; |
11049 | } |
11050 | |
11051 | if (sc->sc_flags & WM_F_LOCK_EECD) { |
11052 | reg = CSR_READ(sc, WMREG_EECD); |
11053 | |
11054 | /* Request EEPROM access. */ |
11055 | reg |= EECD_EE_REQ; |
11056 | CSR_WRITE(sc, WMREG_EECD, reg); |
11057 | |
11058 | /* ..and wait for it to be granted. */ |
11059 | for (x = 0; x < 1000; x++) { |
11060 | reg = CSR_READ(sc, WMREG_EECD); |
11061 | if (reg & EECD_EE_GNT) |
11062 | break; |
11063 | delay(5); |
11064 | } |
11065 | if ((reg & EECD_EE_GNT) == 0) { |
11066 | aprint_error_dev(sc->sc_dev, |
11067 | "could not acquire EEPROM GNT\n" ); |
11068 | reg &= ~EECD_EE_REQ; |
11069 | CSR_WRITE(sc, WMREG_EECD, reg); |
11070 | if (sc->sc_flags & WM_F_LOCK_EXTCNF) |
11071 | wm_put_swfwhw_semaphore(sc); |
11072 | if (sc->sc_flags & WM_F_LOCK_SWFW) |
11073 | wm_put_swfw_semaphore(sc, SWFW_EEP_SM); |
11074 | else if (sc->sc_flags & WM_F_LOCK_SWSM) |
11075 | wm_put_swsm_semaphore(sc); |
11076 | return 1; |
11077 | } |
11078 | } |
11079 | |
11080 | return 0; |
11081 | } |
11082 | |
11083 | /* |
11084 | * wm_nvm_release: |
11085 | * |
11086 | * Release the EEPROM mutex. |
11087 | */ |
11088 | static void |
11089 | wm_nvm_release(struct wm_softc *sc) |
11090 | { |
11091 | uint32_t reg; |
11092 | |
11093 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
11094 | device_xname(sc->sc_dev), __func__)); |
11095 | |
11096 | if (sc->sc_flags & WM_F_LOCK_EECD) { |
11097 | reg = CSR_READ(sc, WMREG_EECD); |
11098 | reg &= ~EECD_EE_REQ; |
11099 | CSR_WRITE(sc, WMREG_EECD, reg); |
11100 | } |
11101 | |
11102 | if (sc->sc_type >= WM_T_ICH8) { |
11103 | wm_put_nvm_ich8lan(sc); |
11104 | } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) |
11105 | wm_put_swfwhw_semaphore(sc); |
11106 | if (sc->sc_flags & WM_F_LOCK_SWFW) |
11107 | wm_put_swfw_semaphore(sc, SWFW_EEP_SM); |
11108 | else if (sc->sc_flags & WM_F_LOCK_SWSM) |
11109 | wm_put_swsm_semaphore(sc); |
11110 | } |
11111 | |
11112 | static int |
11113 | wm_nvm_is_onboard_eeprom(struct wm_softc *sc) |
11114 | { |
11115 | uint32_t eecd = 0; |
11116 | |
11117 | if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574 |
11118 | || sc->sc_type == WM_T_82583) { |
11119 | eecd = CSR_READ(sc, WMREG_EECD); |
11120 | |
11121 | /* Isolate bits 15 & 16 */ |
11122 | eecd = ((eecd >> 15) & 0x03); |
11123 | |
11124 | /* If both bits are set, device is Flash type */ |
11125 | if (eecd == 0x03) |
11126 | return 0; |
11127 | } |
11128 | return 1; |
11129 | } |
11130 | |
11131 | static int |
11132 | wm_nvm_get_flash_presence_i210(struct wm_softc *sc) |
11133 | { |
11134 | uint32_t eec; |
11135 | |
11136 | eec = CSR_READ(sc, WMREG_EEC); |
11137 | if ((eec & EEC_FLASH_DETECTED) != 0) |
11138 | return 1; |
11139 | |
11140 | return 0; |
11141 | } |
11142 | |
11143 | /* |
11144 | * wm_nvm_validate_checksum |
11145 | * |
11146 | * The checksum is defined as the sum of the first 64 (16 bit) words. |
11147 | */ |
11148 | static int |
11149 | wm_nvm_validate_checksum(struct wm_softc *sc) |
11150 | { |
11151 | uint16_t checksum; |
11152 | uint16_t eeprom_data; |
11153 | #ifdef WM_DEBUG |
11154 | uint16_t csum_wordaddr, valid_checksum; |
11155 | #endif |
11156 | int i; |
11157 | |
11158 | checksum = 0; |
11159 | |
11160 | /* Don't check for I211 */ |
11161 | if (sc->sc_type == WM_T_I211) |
11162 | return 0; |
11163 | |
11164 | #ifdef WM_DEBUG |
11165 | if (sc->sc_type == WM_T_PCH_LPT) { |
11166 | csum_wordaddr = NVM_OFF_COMPAT; |
11167 | valid_checksum = NVM_COMPAT_VALID_CHECKSUM; |
11168 | } else { |
11169 | csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1; |
11170 | valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM; |
11171 | } |
11172 | |
11173 | /* Dump EEPROM image for debug */ |
11174 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
11175 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
11176 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { |
11177 | /* XXX PCH_SPT? */ |
11178 | wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data); |
11179 | if ((eeprom_data & valid_checksum) == 0) { |
11180 | DPRINTF(WM_DEBUG_NVM, |
11181 | ("%s: NVM need to be updated (%04x != %04x)\n" , |
11182 | device_xname(sc->sc_dev), eeprom_data, |
11183 | valid_checksum)); |
11184 | } |
11185 | } |
11186 | |
11187 | if ((wm_debug & WM_DEBUG_NVM) != 0) { |
11188 | printf("%s: NVM dump:\n" , device_xname(sc->sc_dev)); |
11189 | for (i = 0; i < NVM_SIZE; i++) { |
11190 | if (wm_nvm_read(sc, i, 1, &eeprom_data)) |
11191 | printf("XXXX " ); |
11192 | else |
11193 | printf("%04hx " , eeprom_data); |
11194 | if (i % 8 == 7) |
11195 | printf("\n" ); |
11196 | } |
11197 | } |
11198 | |
11199 | #endif /* WM_DEBUG */ |
11200 | |
11201 | for (i = 0; i < NVM_SIZE; i++) { |
11202 | if (wm_nvm_read(sc, i, 1, &eeprom_data)) |
11203 | return 1; |
11204 | checksum += eeprom_data; |
11205 | } |
11206 | |
11207 | if (checksum != (uint16_t) NVM_CHECKSUM) { |
11208 | #ifdef WM_DEBUG |
11209 | printf("%s: NVM checksum mismatch (%04x != %04x)\n" , |
11210 | device_xname(sc->sc_dev), checksum, NVM_CHECKSUM); |
11211 | #endif |
11212 | } |
11213 | |
11214 | return 0; |
11215 | } |
11216 | |
11217 | static void |
11218 | wm_nvm_version_invm(struct wm_softc *sc) |
11219 | { |
11220 | uint32_t dword; |
11221 | |
11222 | /* |
11223 | * Linux's code to decode version is very strange, so we don't |
11224 | * obey that algorithm and just use word 61 as the document. |
11225 | * Perhaps it's not perfect though... |
11226 | * |
11227 | * Example: |
11228 | * |
11229 | * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6) |
11230 | */ |
11231 | dword = CSR_READ(sc, WM_INVM_DATA_REG(61)); |
11232 | dword = __SHIFTOUT(dword, INVM_VER_1); |
11233 | sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR); |
11234 | sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR); |
11235 | } |
11236 | |
11237 | static void |
11238 | wm_nvm_version(struct wm_softc *sc) |
11239 | { |
11240 | uint16_t major, minor, build, patch; |
11241 | uint16_t uid0, uid1; |
11242 | uint16_t nvm_data; |
11243 | uint16_t off; |
11244 | bool check_version = false; |
11245 | bool check_optionrom = false; |
11246 | bool have_build = false; |
11247 | |
11248 | /* |
11249 | * Version format: |
11250 | * |
11251 | * XYYZ |
11252 | * X0YZ |
11253 | * X0YY |
11254 | * |
11255 | * Example: |
11256 | * |
11257 | * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10) |
11258 | * 82571 0x50a6 5.10.6? |
11259 | * 82572 0x506a 5.6.10? |
11260 | * 82572EI 0x5069 5.6.9? |
11261 | * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4) |
11262 | * 0x2013 2.1.3? |
11263 | * 82583 0x10a0 1.10.0? (document says it's default vaule) |
11264 | */ |
11265 | wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1); |
11266 | switch (sc->sc_type) { |
11267 | case WM_T_82571: |
11268 | case WM_T_82572: |
11269 | case WM_T_82574: |
11270 | case WM_T_82583: |
11271 | check_version = true; |
11272 | check_optionrom = true; |
11273 | have_build = true; |
11274 | break; |
11275 | case WM_T_82575: |
11276 | case WM_T_82576: |
11277 | case WM_T_82580: |
11278 | if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID) |
11279 | check_version = true; |
11280 | break; |
11281 | case WM_T_I211: |
11282 | wm_nvm_version_invm(sc); |
11283 | goto printver; |
11284 | case WM_T_I210: |
11285 | if (!wm_nvm_get_flash_presence_i210(sc)) { |
11286 | wm_nvm_version_invm(sc); |
11287 | goto printver; |
11288 | } |
11289 | /* FALLTHROUGH */ |
11290 | case WM_T_I350: |
11291 | case WM_T_I354: |
11292 | check_version = true; |
11293 | check_optionrom = true; |
11294 | break; |
11295 | default: |
11296 | return; |
11297 | } |
11298 | if (check_version) { |
11299 | wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data); |
11300 | major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT; |
11301 | if (have_build || ((nvm_data & 0x0f00) != 0x0000)) { |
11302 | minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT; |
11303 | build = nvm_data & NVM_BUILD_MASK; |
11304 | have_build = true; |
11305 | } else |
11306 | minor = nvm_data & 0x00ff; |
11307 | |
11308 | /* Decimal */ |
11309 | minor = (minor / 16) * 10 + (minor % 16); |
11310 | sc->sc_nvm_ver_major = major; |
11311 | sc->sc_nvm_ver_minor = minor; |
11312 | |
11313 | printver: |
11314 | aprint_verbose(", version %d.%d" , sc->sc_nvm_ver_major, |
11315 | sc->sc_nvm_ver_minor); |
11316 | if (have_build) { |
11317 | sc->sc_nvm_ver_build = build; |
11318 | aprint_verbose(".%d" , build); |
11319 | } |
11320 | } |
11321 | if (check_optionrom) { |
11322 | wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off); |
11323 | /* Option ROM Version */ |
11324 | if ((off != 0x0000) && (off != 0xffff)) { |
11325 | off += NVM_COMBO_VER_OFF; |
11326 | wm_nvm_read(sc, off + 1, 1, &uid1); |
11327 | wm_nvm_read(sc, off, 1, &uid0); |
11328 | if ((uid0 != 0) && (uid0 != 0xffff) |
11329 | && (uid1 != 0) && (uid1 != 0xffff)) { |
11330 | /* 16bits */ |
11331 | major = uid0 >> 8; |
11332 | build = (uid0 << 8) | (uid1 >> 8); |
11333 | patch = uid1 & 0x00ff; |
11334 | aprint_verbose(", option ROM Version %d.%d.%d" , |
11335 | major, build, patch); |
11336 | } |
11337 | } |
11338 | } |
11339 | |
11340 | wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0); |
11341 | aprint_verbose(", Image Unique ID %08x" , (uid1 << 16) | uid0); |
11342 | } |
11343 | |
11344 | /* |
11345 | * wm_nvm_read: |
11346 | * |
11347 | * Read data from the serial EEPROM. |
11348 | */ |
11349 | static int |
11350 | wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) |
11351 | { |
11352 | int rv; |
11353 | |
11354 | DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n" , |
11355 | device_xname(sc->sc_dev), __func__)); |
11356 | |
11357 | if (sc->sc_flags & WM_F_EEPROM_INVALID) |
11358 | return 1; |
11359 | |
11360 | if (wm_nvm_acquire(sc)) |
11361 | return 1; |
11362 | |
11363 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
11364 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
11365 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) |
11366 | rv = wm_nvm_read_ich8(sc, word, wordcnt, data); |
11367 | else if (sc->sc_type == WM_T_PCH_SPT) |
11368 | rv = wm_nvm_read_spt(sc, word, wordcnt, data); |
11369 | else if (sc->sc_flags & WM_F_EEPROM_INVM) |
11370 | rv = wm_nvm_read_invm(sc, word, wordcnt, data); |
11371 | else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) |
11372 | rv = wm_nvm_read_eerd(sc, word, wordcnt, data); |
11373 | else if (sc->sc_flags & WM_F_EEPROM_SPI) |
11374 | rv = wm_nvm_read_spi(sc, word, wordcnt, data); |
11375 | else |
11376 | rv = wm_nvm_read_uwire(sc, word, wordcnt, data); |
11377 | |
11378 | wm_nvm_release(sc); |
11379 | return rv; |
11380 | } |
11381 | |
11382 | /* |
11383 | * Hardware semaphores. |
11384 | * Very complexed... |
11385 | */ |
11386 | |
11387 | static int |
11388 | wm_get_null(struct wm_softc *sc) |
11389 | { |
11390 | |
11391 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11392 | device_xname(sc->sc_dev), __func__)); |
11393 | return 0; |
11394 | } |
11395 | |
11396 | static void |
11397 | wm_put_null(struct wm_softc *sc) |
11398 | { |
11399 | |
11400 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11401 | device_xname(sc->sc_dev), __func__)); |
11402 | return; |
11403 | } |
11404 | |
11405 | /* |
11406 | * Get hardware semaphore. |
11407 | * Same as e1000_get_hw_semaphore_generic() |
11408 | */ |
11409 | static int |
11410 | wm_get_swsm_semaphore(struct wm_softc *sc) |
11411 | { |
11412 | int32_t timeout; |
11413 | uint32_t swsm; |
11414 | |
11415 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11416 | device_xname(sc->sc_dev), __func__)); |
11417 | KASSERT(sc->sc_nvm_wordsize > 0); |
11418 | |
11419 | /* Get the SW semaphore. */ |
11420 | timeout = sc->sc_nvm_wordsize + 1; |
11421 | while (timeout) { |
11422 | swsm = CSR_READ(sc, WMREG_SWSM); |
11423 | |
11424 | if ((swsm & SWSM_SMBI) == 0) |
11425 | break; |
11426 | |
11427 | delay(50); |
11428 | timeout--; |
11429 | } |
11430 | |
11431 | if (timeout == 0) { |
11432 | aprint_error_dev(sc->sc_dev, |
11433 | "could not acquire SWSM SMBI\n" ); |
11434 | return 1; |
11435 | } |
11436 | |
11437 | /* Get the FW semaphore. */ |
11438 | timeout = sc->sc_nvm_wordsize + 1; |
11439 | while (timeout) { |
11440 | swsm = CSR_READ(sc, WMREG_SWSM); |
11441 | swsm |= SWSM_SWESMBI; |
11442 | CSR_WRITE(sc, WMREG_SWSM, swsm); |
11443 | /* If we managed to set the bit we got the semaphore. */ |
11444 | swsm = CSR_READ(sc, WMREG_SWSM); |
11445 | if (swsm & SWSM_SWESMBI) |
11446 | break; |
11447 | |
11448 | delay(50); |
11449 | timeout--; |
11450 | } |
11451 | |
11452 | if (timeout == 0) { |
11453 | aprint_error_dev(sc->sc_dev, |
11454 | "could not acquire SWSM SWESMBI\n" ); |
11455 | /* Release semaphores */ |
11456 | wm_put_swsm_semaphore(sc); |
11457 | return 1; |
11458 | } |
11459 | return 0; |
11460 | } |
11461 | |
11462 | /* |
11463 | * Put hardware semaphore. |
11464 | * Same as e1000_put_hw_semaphore_generic() |
11465 | */ |
11466 | static void |
11467 | wm_put_swsm_semaphore(struct wm_softc *sc) |
11468 | { |
11469 | uint32_t swsm; |
11470 | |
11471 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11472 | device_xname(sc->sc_dev), __func__)); |
11473 | |
11474 | swsm = CSR_READ(sc, WMREG_SWSM); |
11475 | swsm &= ~(SWSM_SMBI | SWSM_SWESMBI); |
11476 | CSR_WRITE(sc, WMREG_SWSM, swsm); |
11477 | } |
11478 | |
11479 | /* |
11480 | * Get SW/FW semaphore. |
11481 | * Same as e1000_acquire_swfw_sync_82575(). |
11482 | */ |
11483 | static int |
11484 | wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) |
11485 | { |
11486 | uint32_t swfw_sync; |
11487 | uint32_t swmask = mask << SWFW_SOFT_SHIFT; |
11488 | uint32_t fwmask = mask << SWFW_FIRM_SHIFT; |
11489 | int timeout = 200; |
11490 | |
11491 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11492 | device_xname(sc->sc_dev), __func__)); |
11493 | KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0); |
11494 | |
11495 | for (timeout = 0; timeout < 200; timeout++) { |
11496 | if (sc->sc_flags & WM_F_LOCK_SWSM) { |
11497 | if (wm_get_swsm_semaphore(sc)) { |
11498 | aprint_error_dev(sc->sc_dev, |
11499 | "%s: failed to get semaphore\n" , |
11500 | __func__); |
11501 | return 1; |
11502 | } |
11503 | } |
11504 | swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); |
11505 | if ((swfw_sync & (swmask | fwmask)) == 0) { |
11506 | swfw_sync |= swmask; |
11507 | CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); |
11508 | if (sc->sc_flags & WM_F_LOCK_SWSM) |
11509 | wm_put_swsm_semaphore(sc); |
11510 | return 0; |
11511 | } |
11512 | if (sc->sc_flags & WM_F_LOCK_SWSM) |
11513 | wm_put_swsm_semaphore(sc); |
11514 | delay(5000); |
11515 | } |
11516 | printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n" , |
11517 | device_xname(sc->sc_dev), mask, swfw_sync); |
11518 | return 1; |
11519 | } |
11520 | |
11521 | static void |
11522 | wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) |
11523 | { |
11524 | uint32_t swfw_sync; |
11525 | |
11526 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11527 | device_xname(sc->sc_dev), __func__)); |
11528 | KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0); |
11529 | |
11530 | if (sc->sc_flags & WM_F_LOCK_SWSM) { |
11531 | while (wm_get_swsm_semaphore(sc) != 0) |
11532 | continue; |
11533 | } |
11534 | swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); |
11535 | swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); |
11536 | CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); |
11537 | if (sc->sc_flags & WM_F_LOCK_SWSM) |
11538 | wm_put_swsm_semaphore(sc); |
11539 | } |
11540 | |
11541 | static int |
11542 | wm_get_phy_82575(struct wm_softc *sc) |
11543 | { |
11544 | |
11545 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11546 | device_xname(sc->sc_dev), __func__)); |
11547 | return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); |
11548 | } |
11549 | |
11550 | static void |
11551 | wm_put_phy_82575(struct wm_softc *sc) |
11552 | { |
11553 | |
11554 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11555 | device_xname(sc->sc_dev), __func__)); |
11556 | return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); |
11557 | } |
11558 | |
11559 | static int |
11560 | wm_get_swfwhw_semaphore(struct wm_softc *sc) |
11561 | { |
11562 | uint32_t ext_ctrl; |
11563 | int timeout = 200; |
11564 | |
11565 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11566 | device_xname(sc->sc_dev), __func__)); |
11567 | |
11568 | mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ |
11569 | for (timeout = 0; timeout < 200; timeout++) { |
11570 | ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); |
11571 | ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; |
11572 | CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); |
11573 | |
11574 | ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); |
11575 | if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) |
11576 | return 0; |
11577 | delay(5000); |
11578 | } |
11579 | printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n" , |
11580 | device_xname(sc->sc_dev), ext_ctrl); |
11581 | mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ |
11582 | return 1; |
11583 | } |
11584 | |
11585 | static void |
11586 | wm_put_swfwhw_semaphore(struct wm_softc *sc) |
11587 | { |
11588 | uint32_t ext_ctrl; |
11589 | |
11590 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11591 | device_xname(sc->sc_dev), __func__)); |
11592 | |
11593 | ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); |
11594 | ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; |
11595 | CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); |
11596 | |
11597 | mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ |
11598 | } |
11599 | |
11600 | static int |
11601 | wm_get_swflag_ich8lan(struct wm_softc *sc) |
11602 | { |
11603 | uint32_t ext_ctrl; |
11604 | int timeout; |
11605 | |
11606 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11607 | device_xname(sc->sc_dev), __func__)); |
11608 | mutex_enter(sc->sc_ich_phymtx); |
11609 | for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) { |
11610 | ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); |
11611 | if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0) |
11612 | break; |
11613 | delay(1000); |
11614 | } |
11615 | if (timeout >= WM_PHY_CFG_TIMEOUT) { |
11616 | printf("%s: SW has already locked the resource\n" , |
11617 | device_xname(sc->sc_dev)); |
11618 | goto out; |
11619 | } |
11620 | |
11621 | ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; |
11622 | CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); |
11623 | for (timeout = 0; timeout < 1000; timeout++) { |
11624 | ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); |
11625 | if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) |
11626 | break; |
11627 | delay(1000); |
11628 | } |
11629 | if (timeout >= 1000) { |
11630 | printf("%s: failed to acquire semaphore\n" , |
11631 | device_xname(sc->sc_dev)); |
11632 | ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; |
11633 | CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); |
11634 | goto out; |
11635 | } |
11636 | return 0; |
11637 | |
11638 | out: |
11639 | mutex_exit(sc->sc_ich_phymtx); |
11640 | return 1; |
11641 | } |
11642 | |
11643 | static void |
11644 | wm_put_swflag_ich8lan(struct wm_softc *sc) |
11645 | { |
11646 | uint32_t ext_ctrl; |
11647 | |
11648 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11649 | device_xname(sc->sc_dev), __func__)); |
11650 | ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); |
11651 | if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) { |
11652 | ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; |
11653 | CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); |
11654 | } else { |
11655 | printf("%s: Semaphore unexpectedly released\n" , |
11656 | device_xname(sc->sc_dev)); |
11657 | } |
11658 | |
11659 | mutex_exit(sc->sc_ich_phymtx); |
11660 | } |
11661 | |
11662 | static int |
11663 | wm_get_nvm_ich8lan(struct wm_softc *sc) |
11664 | { |
11665 | |
11666 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11667 | device_xname(sc->sc_dev), __func__)); |
11668 | mutex_enter(sc->sc_ich_nvmmtx); |
11669 | |
11670 | return 0; |
11671 | } |
11672 | |
11673 | static void |
11674 | wm_put_nvm_ich8lan(struct wm_softc *sc) |
11675 | { |
11676 | |
11677 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11678 | device_xname(sc->sc_dev), __func__)); |
11679 | mutex_exit(sc->sc_ich_nvmmtx); |
11680 | } |
11681 | |
11682 | static int |
11683 | wm_get_hw_semaphore_82573(struct wm_softc *sc) |
11684 | { |
11685 | int i = 0; |
11686 | uint32_t reg; |
11687 | |
11688 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11689 | device_xname(sc->sc_dev), __func__)); |
11690 | |
11691 | reg = CSR_READ(sc, WMREG_EXTCNFCTR); |
11692 | do { |
11693 | CSR_WRITE(sc, WMREG_EXTCNFCTR, |
11694 | reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); |
11695 | reg = CSR_READ(sc, WMREG_EXTCNFCTR); |
11696 | if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) |
11697 | break; |
11698 | delay(2*1000); |
11699 | i++; |
11700 | } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); |
11701 | |
11702 | if (i == WM_MDIO_OWNERSHIP_TIMEOUT) { |
11703 | wm_put_hw_semaphore_82573(sc); |
11704 | log(LOG_ERR, "%s: Driver can't access the PHY\n" , |
11705 | device_xname(sc->sc_dev)); |
11706 | return -1; |
11707 | } |
11708 | |
11709 | return 0; |
11710 | } |
11711 | |
11712 | static void |
11713 | wm_put_hw_semaphore_82573(struct wm_softc *sc) |
11714 | { |
11715 | uint32_t reg; |
11716 | |
11717 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11718 | device_xname(sc->sc_dev), __func__)); |
11719 | |
11720 | reg = CSR_READ(sc, WMREG_EXTCNFCTR); |
11721 | reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; |
11722 | CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); |
11723 | } |
11724 | |
11725 | /* |
11726 | * Management mode and power management related subroutines. |
11727 | * BMC, AMT, suspend/resume and EEE. |
11728 | */ |
11729 | |
11730 | #ifdef WM_WOL |
11731 | static int |
11732 | wm_check_mng_mode(struct wm_softc *sc) |
11733 | { |
11734 | int rv; |
11735 | |
11736 | switch (sc->sc_type) { |
11737 | case WM_T_ICH8: |
11738 | case WM_T_ICH9: |
11739 | case WM_T_ICH10: |
11740 | case WM_T_PCH: |
11741 | case WM_T_PCH2: |
11742 | case WM_T_PCH_LPT: |
11743 | case WM_T_PCH_SPT: |
11744 | rv = wm_check_mng_mode_ich8lan(sc); |
11745 | break; |
11746 | case WM_T_82574: |
11747 | case WM_T_82583: |
11748 | rv = wm_check_mng_mode_82574(sc); |
11749 | break; |
11750 | case WM_T_82571: |
11751 | case WM_T_82572: |
11752 | case WM_T_82573: |
11753 | case WM_T_80003: |
11754 | rv = wm_check_mng_mode_generic(sc); |
11755 | break; |
11756 | default: |
11757 | /* noting to do */ |
11758 | rv = 0; |
11759 | break; |
11760 | } |
11761 | |
11762 | return rv; |
11763 | } |
11764 | |
11765 | static int |
11766 | wm_check_mng_mode_ich8lan(struct wm_softc *sc) |
11767 | { |
11768 | uint32_t fwsm; |
11769 | |
11770 | fwsm = CSR_READ(sc, WMREG_FWSM); |
11771 | |
11772 | if (((fwsm & FWSM_FW_VALID) != 0) |
11773 | && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) |
11774 | return 1; |
11775 | |
11776 | return 0; |
11777 | } |
11778 | |
11779 | static int |
11780 | wm_check_mng_mode_82574(struct wm_softc *sc) |
11781 | { |
11782 | uint16_t data; |
11783 | |
11784 | wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); |
11785 | |
11786 | if ((data & NVM_CFG2_MNGM_MASK) != 0) |
11787 | return 1; |
11788 | |
11789 | return 0; |
11790 | } |
11791 | |
11792 | static int |
11793 | wm_check_mng_mode_generic(struct wm_softc *sc) |
11794 | { |
11795 | uint32_t fwsm; |
11796 | |
11797 | fwsm = CSR_READ(sc, WMREG_FWSM); |
11798 | |
11799 | if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE) |
11800 | return 1; |
11801 | |
11802 | return 0; |
11803 | } |
11804 | #endif /* WM_WOL */ |
11805 | |
11806 | static int |
11807 | wm_enable_mng_pass_thru(struct wm_softc *sc) |
11808 | { |
11809 | uint32_t manc, fwsm, factps; |
11810 | |
11811 | if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0) |
11812 | return 0; |
11813 | |
11814 | manc = CSR_READ(sc, WMREG_MANC); |
11815 | |
11816 | DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n" , |
11817 | device_xname(sc->sc_dev), manc)); |
11818 | if ((manc & MANC_RECV_TCO_EN) == 0) |
11819 | return 0; |
11820 | |
11821 | if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) { |
11822 | fwsm = CSR_READ(sc, WMREG_FWSM); |
11823 | factps = CSR_READ(sc, WMREG_FACTPS); |
11824 | if (((factps & FACTPS_MNGCG) == 0) |
11825 | && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) |
11826 | return 1; |
11827 | } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ |
11828 | uint16_t data; |
11829 | |
11830 | factps = CSR_READ(sc, WMREG_FACTPS); |
11831 | wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); |
11832 | DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n" , |
11833 | device_xname(sc->sc_dev), factps, data)); |
11834 | if (((factps & FACTPS_MNGCG) == 0) |
11835 | && ((data & NVM_CFG2_MNGM_MASK) |
11836 | == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT))) |
11837 | return 1; |
11838 | } else if (((manc & MANC_SMBUS_EN) != 0) |
11839 | && ((manc & MANC_ASF_EN) == 0)) |
11840 | return 1; |
11841 | |
11842 | return 0; |
11843 | } |
11844 | |
11845 | static bool |
11846 | wm_phy_resetisblocked(struct wm_softc *sc) |
11847 | { |
11848 | bool blocked = false; |
11849 | uint32_t reg; |
11850 | int i = 0; |
11851 | |
11852 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
11853 | device_xname(sc->sc_dev), __func__)); |
11854 | |
11855 | switch (sc->sc_type) { |
11856 | case WM_T_ICH8: |
11857 | case WM_T_ICH9: |
11858 | case WM_T_ICH10: |
11859 | case WM_T_PCH: |
11860 | case WM_T_PCH2: |
11861 | case WM_T_PCH_LPT: |
11862 | case WM_T_PCH_SPT: |
11863 | do { |
11864 | reg = CSR_READ(sc, WMREG_FWSM); |
11865 | if ((reg & FWSM_RSPCIPHY) == 0) { |
11866 | blocked = true; |
11867 | delay(10*1000); |
11868 | continue; |
11869 | } |
11870 | blocked = false; |
11871 | } while (blocked && (i++ < 30)); |
11872 | return blocked; |
11873 | break; |
11874 | case WM_T_82571: |
11875 | case WM_T_82572: |
11876 | case WM_T_82573: |
11877 | case WM_T_82574: |
11878 | case WM_T_82583: |
11879 | case WM_T_80003: |
11880 | reg = CSR_READ(sc, WMREG_MANC); |
11881 | if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0) |
11882 | return true; |
11883 | else |
11884 | return false; |
11885 | break; |
11886 | default: |
11887 | /* no problem */ |
11888 | break; |
11889 | } |
11890 | |
11891 | return false; |
11892 | } |
11893 | |
11894 | static void |
11895 | wm_get_hw_control(struct wm_softc *sc) |
11896 | { |
11897 | uint32_t reg; |
11898 | |
11899 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11900 | device_xname(sc->sc_dev), __func__)); |
11901 | |
11902 | if (sc->sc_type == WM_T_82573) { |
11903 | reg = CSR_READ(sc, WMREG_SWSM); |
11904 | CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); |
11905 | } else if (sc->sc_type >= WM_T_82571) { |
11906 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
11907 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); |
11908 | } |
11909 | } |
11910 | |
11911 | static void |
11912 | wm_release_hw_control(struct wm_softc *sc) |
11913 | { |
11914 | uint32_t reg; |
11915 | |
11916 | DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n" , |
11917 | device_xname(sc->sc_dev), __func__)); |
11918 | |
11919 | if (sc->sc_type == WM_T_82573) { |
11920 | reg = CSR_READ(sc, WMREG_SWSM); |
11921 | CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD); |
11922 | } else if (sc->sc_type >= WM_T_82571) { |
11923 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
11924 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD); |
11925 | } |
11926 | } |
11927 | |
11928 | static void |
11929 | wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate) |
11930 | { |
11931 | uint32_t reg; |
11932 | |
11933 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
11934 | device_xname(sc->sc_dev), __func__)); |
11935 | |
11936 | if (sc->sc_type < WM_T_PCH2) |
11937 | return; |
11938 | |
11939 | reg = CSR_READ(sc, WMREG_EXTCNFCTR); |
11940 | |
11941 | if (gate) |
11942 | reg |= EXTCNFCTR_GATE_PHY_CFG; |
11943 | else |
11944 | reg &= ~EXTCNFCTR_GATE_PHY_CFG; |
11945 | |
11946 | CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); |
11947 | } |
11948 | |
11949 | static void |
11950 | wm_smbustopci(struct wm_softc *sc) |
11951 | { |
11952 | uint32_t fwsm, reg; |
11953 | int rv = 0; |
11954 | |
11955 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
11956 | device_xname(sc->sc_dev), __func__)); |
11957 | |
11958 | /* Gate automatic PHY configuration by hardware on non-managed 82579 */ |
11959 | wm_gate_hw_phy_config_ich8lan(sc, true); |
11960 | |
11961 | /* Disable ULP */ |
11962 | wm_ulp_disable(sc); |
11963 | |
11964 | /* Acquire PHY semaphore */ |
11965 | sc->phy.acquire(sc); |
11966 | |
11967 | fwsm = CSR_READ(sc, WMREG_FWSM); |
11968 | switch (sc->sc_type) { |
11969 | case WM_T_PCH_LPT: |
11970 | case WM_T_PCH_SPT: |
11971 | if (wm_phy_is_accessible_pchlan(sc)) |
11972 | break; |
11973 | |
11974 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
11975 | reg |= CTRL_EXT_FORCE_SMBUS; |
11976 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
11977 | #if 0 |
11978 | /* XXX Isn't this required??? */ |
11979 | CSR_WRITE_FLUSH(sc); |
11980 | #endif |
11981 | delay(50 * 1000); |
11982 | /* FALLTHROUGH */ |
11983 | case WM_T_PCH2: |
11984 | if (wm_phy_is_accessible_pchlan(sc) == true) |
11985 | break; |
11986 | /* FALLTHROUGH */ |
11987 | case WM_T_PCH: |
11988 | if (sc->sc_type == WM_T_PCH) |
11989 | if ((fwsm & FWSM_FW_VALID) != 0) |
11990 | break; |
11991 | |
11992 | if (wm_phy_resetisblocked(sc) == true) { |
11993 | printf("XXX reset is blocked(3)\n" ); |
11994 | break; |
11995 | } |
11996 | |
11997 | wm_toggle_lanphypc_pch_lpt(sc); |
11998 | |
11999 | if (sc->sc_type >= WM_T_PCH_LPT) { |
12000 | if (wm_phy_is_accessible_pchlan(sc) == true) |
12001 | break; |
12002 | |
12003 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
12004 | reg &= ~CTRL_EXT_FORCE_SMBUS; |
12005 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
12006 | |
12007 | if (wm_phy_is_accessible_pchlan(sc) == true) |
12008 | break; |
12009 | rv = -1; |
12010 | } |
12011 | break; |
12012 | default: |
12013 | break; |
12014 | } |
12015 | |
12016 | /* Release semaphore */ |
12017 | sc->phy.release(sc); |
12018 | |
12019 | if (rv == 0) { |
12020 | if (wm_phy_resetisblocked(sc)) { |
12021 | printf("XXX reset is blocked(4)\n" ); |
12022 | goto out; |
12023 | } |
12024 | wm_reset_phy(sc); |
12025 | if (wm_phy_resetisblocked(sc)) |
12026 | printf("XXX reset is blocked(4)\n" ); |
12027 | } |
12028 | |
12029 | out: |
12030 | /* |
12031 | * Ungate automatic PHY configuration by hardware on non-managed 82579 |
12032 | */ |
12033 | if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) { |
12034 | delay(10*1000); |
12035 | wm_gate_hw_phy_config_ich8lan(sc, false); |
12036 | } |
12037 | } |
12038 | |
12039 | static void |
12040 | wm_init_manageability(struct wm_softc *sc) |
12041 | { |
12042 | |
12043 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12044 | device_xname(sc->sc_dev), __func__)); |
12045 | if (sc->sc_flags & WM_F_HAS_MANAGE) { |
12046 | uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H); |
12047 | uint32_t manc = CSR_READ(sc, WMREG_MANC); |
12048 | |
12049 | /* Disable hardware interception of ARP */ |
12050 | manc &= ~MANC_ARP_EN; |
12051 | |
12052 | /* Enable receiving management packets to the host */ |
12053 | if (sc->sc_type >= WM_T_82571) { |
12054 | manc |= MANC_EN_MNG2HOST; |
12055 | manc2h |= MANC2H_PORT_623| MANC2H_PORT_624; |
12056 | CSR_WRITE(sc, WMREG_MANC2H, manc2h); |
12057 | } |
12058 | |
12059 | CSR_WRITE(sc, WMREG_MANC, manc); |
12060 | } |
12061 | } |
12062 | |
12063 | static void |
12064 | wm_release_manageability(struct wm_softc *sc) |
12065 | { |
12066 | |
12067 | if (sc->sc_flags & WM_F_HAS_MANAGE) { |
12068 | uint32_t manc = CSR_READ(sc, WMREG_MANC); |
12069 | |
12070 | manc |= MANC_ARP_EN; |
12071 | if (sc->sc_type >= WM_T_82571) |
12072 | manc &= ~MANC_EN_MNG2HOST; |
12073 | |
12074 | CSR_WRITE(sc, WMREG_MANC, manc); |
12075 | } |
12076 | } |
12077 | |
12078 | static void |
12079 | wm_get_wakeup(struct wm_softc *sc) |
12080 | { |
12081 | |
12082 | /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */ |
12083 | switch (sc->sc_type) { |
12084 | case WM_T_82573: |
12085 | case WM_T_82583: |
12086 | sc->sc_flags |= WM_F_HAS_AMT; |
12087 | /* FALLTHROUGH */ |
12088 | case WM_T_80003: |
12089 | case WM_T_82575: |
12090 | case WM_T_82576: |
12091 | case WM_T_82580: |
12092 | case WM_T_I350: |
12093 | case WM_T_I354: |
12094 | if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0) |
12095 | sc->sc_flags |= WM_F_ARC_SUBSYS_VALID; |
12096 | /* FALLTHROUGH */ |
12097 | case WM_T_82541: |
12098 | case WM_T_82541_2: |
12099 | case WM_T_82547: |
12100 | case WM_T_82547_2: |
12101 | case WM_T_82571: |
12102 | case WM_T_82572: |
12103 | case WM_T_82574: |
12104 | sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; |
12105 | break; |
12106 | case WM_T_ICH8: |
12107 | case WM_T_ICH9: |
12108 | case WM_T_ICH10: |
12109 | case WM_T_PCH: |
12110 | case WM_T_PCH2: |
12111 | case WM_T_PCH_LPT: |
12112 | case WM_T_PCH_SPT: |
12113 | sc->sc_flags |= WM_F_HAS_AMT; |
12114 | sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; |
12115 | break; |
12116 | default: |
12117 | break; |
12118 | } |
12119 | |
12120 | /* 1: HAS_MANAGE */ |
12121 | if (wm_enable_mng_pass_thru(sc) != 0) |
12122 | sc->sc_flags |= WM_F_HAS_MANAGE; |
12123 | |
12124 | #ifdef WM_DEBUG |
12125 | printf("\n" ); |
12126 | if ((sc->sc_flags & WM_F_HAS_AMT) != 0) |
12127 | printf("HAS_AMT," ); |
12128 | if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) |
12129 | printf("ARC_SUBSYS_VALID," ); |
12130 | if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0) |
12131 | printf("ASF_FIRMWARE_PRES," ); |
12132 | if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0) |
12133 | printf("HAS_MANAGE," ); |
12134 | printf("\n" ); |
12135 | #endif |
12136 | /* |
12137 | * Note that the WOL flags is set after the resetting of the eeprom |
12138 | * stuff |
12139 | */ |
12140 | } |
12141 | |
12142 | /* |
12143 | * Unconfigure Ultra Low Power mode. |
12144 | * Only for I217 and newer (see below). |
12145 | */ |
12146 | static void |
12147 | wm_ulp_disable(struct wm_softc *sc) |
12148 | { |
12149 | uint32_t reg; |
12150 | int i = 0; |
12151 | |
12152 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12153 | device_xname(sc->sc_dev), __func__)); |
12154 | /* Exclude old devices */ |
12155 | if ((sc->sc_type < WM_T_PCH_LPT) |
12156 | || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM) |
12157 | || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V) |
12158 | || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2) |
12159 | || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2)) |
12160 | return; |
12161 | |
12162 | if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) { |
12163 | /* Request ME un-configure ULP mode in the PHY */ |
12164 | reg = CSR_READ(sc, WMREG_H2ME); |
12165 | reg &= ~H2ME_ULP; |
12166 | reg |= H2ME_ENFORCE_SETTINGS; |
12167 | CSR_WRITE(sc, WMREG_H2ME, reg); |
12168 | |
12169 | /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ |
12170 | while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) { |
12171 | if (i++ == 30) { |
12172 | printf("%s timed out\n" , __func__); |
12173 | return; |
12174 | } |
12175 | delay(10 * 1000); |
12176 | } |
12177 | reg = CSR_READ(sc, WMREG_H2ME); |
12178 | reg &= ~H2ME_ENFORCE_SETTINGS; |
12179 | CSR_WRITE(sc, WMREG_H2ME, reg); |
12180 | |
12181 | return; |
12182 | } |
12183 | |
12184 | /* Acquire semaphore */ |
12185 | sc->phy.acquire(sc); |
12186 | |
12187 | /* Toggle LANPHYPC */ |
12188 | wm_toggle_lanphypc_pch_lpt(sc); |
12189 | |
12190 | /* Unforce SMBus mode in PHY */ |
12191 | reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL); |
12192 | if (reg == 0x0000 || reg == 0xffff) { |
12193 | uint32_t reg2; |
12194 | |
12195 | printf("%s: Force SMBus first.\n" , __func__); |
12196 | reg2 = CSR_READ(sc, WMREG_CTRL_EXT); |
12197 | reg2 |= CTRL_EXT_FORCE_SMBUS; |
12198 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg2); |
12199 | delay(50 * 1000); |
12200 | |
12201 | reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL); |
12202 | } |
12203 | reg &= ~CV_SMB_CTRL_FORCE_SMBUS; |
12204 | wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg); |
12205 | |
12206 | /* Unforce SMBus mode in MAC */ |
12207 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
12208 | reg &= ~CTRL_EXT_FORCE_SMBUS; |
12209 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
12210 | |
12211 | reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL); |
12212 | reg |= HV_PM_CTRL_K1_ENA; |
12213 | wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg); |
12214 | |
12215 | reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1); |
12216 | reg &= ~(I218_ULP_CONFIG1_IND |
12217 | | I218_ULP_CONFIG1_STICKY_ULP |
12218 | | I218_ULP_CONFIG1_RESET_TO_SMBUS |
12219 | | I218_ULP_CONFIG1_WOL_HOST |
12220 | | I218_ULP_CONFIG1_INBAND_EXIT |
12221 | | I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
12222 | | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
12223 | | I218_ULP_CONFIG1_DIS_SMB_PERST); |
12224 | wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg); |
12225 | reg |= I218_ULP_CONFIG1_START; |
12226 | wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg); |
12227 | |
12228 | reg = CSR_READ(sc, WMREG_FEXTNVM7); |
12229 | reg &= ~FEXTNVM7_DIS_SMB_PERST; |
12230 | CSR_WRITE(sc, WMREG_FEXTNVM7, reg); |
12231 | |
12232 | /* Release semaphore */ |
12233 | sc->phy.release(sc); |
12234 | wm_gmii_reset(sc); |
12235 | delay(50 * 1000); |
12236 | } |
12237 | |
12238 | /* WOL in the newer chipset interfaces (pchlan) */ |
12239 | static void |
12240 | wm_enable_phy_wakeup(struct wm_softc *sc) |
12241 | { |
12242 | #if 0 |
12243 | uint16_t preg; |
12244 | |
12245 | /* Copy MAC RARs to PHY RARs */ |
12246 | |
12247 | /* Copy MAC MTA to PHY MTA */ |
12248 | |
12249 | /* Configure PHY Rx Control register */ |
12250 | |
12251 | /* Enable PHY wakeup in MAC register */ |
12252 | |
12253 | /* Configure and enable PHY wakeup in PHY registers */ |
12254 | |
12255 | /* Activate PHY wakeup */ |
12256 | |
12257 | /* XXX */ |
12258 | #endif |
12259 | } |
12260 | |
12261 | /* Power down workaround on D3 */ |
12262 | static void |
12263 | wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc) |
12264 | { |
12265 | uint32_t reg; |
12266 | int i; |
12267 | |
12268 | for (i = 0; i < 2; i++) { |
12269 | /* Disable link */ |
12270 | reg = CSR_READ(sc, WMREG_PHY_CTRL); |
12271 | reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; |
12272 | CSR_WRITE(sc, WMREG_PHY_CTRL, reg); |
12273 | |
12274 | /* |
12275 | * Call gig speed drop workaround on Gig disable before |
12276 | * accessing any PHY registers |
12277 | */ |
12278 | if (sc->sc_type == WM_T_ICH8) |
12279 | wm_gig_downshift_workaround_ich8lan(sc); |
12280 | |
12281 | /* Write VR power-down enable */ |
12282 | reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); |
12283 | reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; |
12284 | reg |= IGP3_VR_CTRL_MODE_SHUTDOWN; |
12285 | sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg); |
12286 | |
12287 | /* Read it back and test */ |
12288 | reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); |
12289 | reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; |
12290 | if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0)) |
12291 | break; |
12292 | |
12293 | /* Issue PHY reset and repeat at most one more time */ |
12294 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); |
12295 | } |
12296 | } |
12297 | |
12298 | static void |
12299 | wm_enable_wakeup(struct wm_softc *sc) |
12300 | { |
12301 | uint32_t reg, pmreg; |
12302 | pcireg_t pmode; |
12303 | |
12304 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12305 | device_xname(sc->sc_dev), __func__)); |
12306 | |
12307 | if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, |
12308 | &pmreg, NULL) == 0) |
12309 | return; |
12310 | |
12311 | /* Advertise the wakeup capability */ |
12312 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) |
12313 | | CTRL_SWDPIN(3)); |
12314 | CSR_WRITE(sc, WMREG_WUC, WUC_APME); |
12315 | |
12316 | /* ICH workaround */ |
12317 | switch (sc->sc_type) { |
12318 | case WM_T_ICH8: |
12319 | case WM_T_ICH9: |
12320 | case WM_T_ICH10: |
12321 | case WM_T_PCH: |
12322 | case WM_T_PCH2: |
12323 | case WM_T_PCH_LPT: |
12324 | case WM_T_PCH_SPT: |
12325 | /* Disable gig during WOL */ |
12326 | reg = CSR_READ(sc, WMREG_PHY_CTRL); |
12327 | reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS; |
12328 | CSR_WRITE(sc, WMREG_PHY_CTRL, reg); |
12329 | if (sc->sc_type == WM_T_PCH) |
12330 | wm_gmii_reset(sc); |
12331 | |
12332 | /* Power down workaround */ |
12333 | if (sc->sc_phytype == WMPHY_82577) { |
12334 | struct mii_softc *child; |
12335 | |
12336 | /* Assume that the PHY is copper */ |
12337 | child = LIST_FIRST(&sc->sc_mii.mii_phys); |
12338 | if (child->mii_mpd_rev <= 2) |
12339 | sc->sc_mii.mii_writereg(sc->sc_dev, 1, |
12340 | (768 << 5) | 25, 0x0444); /* magic num */ |
12341 | } |
12342 | break; |
12343 | default: |
12344 | break; |
12345 | } |
12346 | |
12347 | /* Keep the laser running on fiber adapters */ |
12348 | if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) |
12349 | || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) { |
12350 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
12351 | reg |= CTRL_EXT_SWDPIN(3); |
12352 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
12353 | } |
12354 | |
12355 | reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG; |
12356 | #if 0 /* for the multicast packet */ |
12357 | reg |= WUFC_MC; |
12358 | CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE); |
12359 | #endif |
12360 | |
12361 | if (sc->sc_type >= WM_T_PCH) |
12362 | wm_enable_phy_wakeup(sc); |
12363 | else { |
12364 | CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN); |
12365 | CSR_WRITE(sc, WMREG_WUFC, reg); |
12366 | } |
12367 | |
12368 | if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
12369 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
12370 | || (sc->sc_type == WM_T_PCH2)) |
12371 | && (sc->sc_phytype == WMPHY_IGP_3)) |
12372 | wm_igp3_phy_powerdown_workaround_ich8lan(sc); |
12373 | |
12374 | /* Request PME */ |
12375 | pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR); |
12376 | #if 0 |
12377 | /* Disable WOL */ |
12378 | pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN); |
12379 | #else |
12380 | /* For WOL */ |
12381 | pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN; |
12382 | #endif |
12383 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode); |
12384 | } |
12385 | |
12386 | /* LPLU */ |
12387 | |
12388 | static void |
12389 | wm_lplu_d0_disable(struct wm_softc *sc) |
12390 | { |
12391 | uint32_t reg; |
12392 | |
12393 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12394 | device_xname(sc->sc_dev), __func__)); |
12395 | |
12396 | reg = CSR_READ(sc, WMREG_PHY_CTRL); |
12397 | reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU); |
12398 | CSR_WRITE(sc, WMREG_PHY_CTRL, reg); |
12399 | } |
12400 | |
12401 | static void |
12402 | wm_lplu_d0_disable_pch(struct wm_softc *sc) |
12403 | { |
12404 | uint32_t reg; |
12405 | |
12406 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12407 | device_xname(sc->sc_dev), __func__)); |
12408 | |
12409 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); |
12410 | reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU); |
12411 | reg |= HV_OEM_BITS_ANEGNOW; |
12412 | wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); |
12413 | } |
12414 | |
12415 | /* EEE */ |
12416 | |
12417 | static void |
12418 | wm_set_eee_i350(struct wm_softc *sc) |
12419 | { |
12420 | uint32_t ipcnfg, eeer; |
12421 | |
12422 | ipcnfg = CSR_READ(sc, WMREG_IPCNFG); |
12423 | eeer = CSR_READ(sc, WMREG_EEER); |
12424 | |
12425 | if ((sc->sc_flags & WM_F_EEE) != 0) { |
12426 | ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); |
12427 | eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN |
12428 | | EEER_LPI_FC); |
12429 | } else { |
12430 | ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); |
12431 | ipcnfg &= ~IPCNFG_10BASE_TE; |
12432 | eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN |
12433 | | EEER_LPI_FC); |
12434 | } |
12435 | |
12436 | CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg); |
12437 | CSR_WRITE(sc, WMREG_EEER, eeer); |
12438 | CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */ |
12439 | CSR_READ(sc, WMREG_EEER); /* XXX flush? */ |
12440 | } |
12441 | |
12442 | /* |
12443 | * Workarounds (mainly PHY related). |
12444 | * Basically, PHY's workarounds are in the PHY drivers. |
12445 | */ |
12446 | |
12447 | /* Work-around for 82566 Kumeran PCS lock loss */ |
12448 | static void |
12449 | wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc) |
12450 | { |
12451 | #if 0 |
12452 | int miistatus, active, i; |
12453 | int reg; |
12454 | |
12455 | miistatus = sc->sc_mii.mii_media_status; |
12456 | |
12457 | /* If the link is not up, do nothing */ |
12458 | if ((miistatus & IFM_ACTIVE) == 0) |
12459 | return; |
12460 | |
12461 | active = sc->sc_mii.mii_media_active; |
12462 | |
12463 | /* Nothing to do if the link is other than 1Gbps */ |
12464 | if (IFM_SUBTYPE(active) != IFM_1000_T) |
12465 | return; |
12466 | |
12467 | for (i = 0; i < 10; i++) { |
12468 | /* read twice */ |
12469 | reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); |
12470 | reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); |
12471 | if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0) |
12472 | goto out; /* GOOD! */ |
12473 | |
12474 | /* Reset the PHY */ |
12475 | wm_gmii_reset(sc); |
12476 | delay(5*1000); |
12477 | } |
12478 | |
12479 | /* Disable GigE link negotiation */ |
12480 | reg = CSR_READ(sc, WMREG_PHY_CTRL); |
12481 | reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; |
12482 | CSR_WRITE(sc, WMREG_PHY_CTRL, reg); |
12483 | |
12484 | /* |
12485 | * Call gig speed drop workaround on Gig disable before accessing |
12486 | * any PHY registers. |
12487 | */ |
12488 | wm_gig_downshift_workaround_ich8lan(sc); |
12489 | |
12490 | out: |
12491 | return; |
12492 | #endif |
12493 | } |
12494 | |
12495 | /* WOL from S5 stops working */ |
12496 | static void |
12497 | wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc) |
12498 | { |
12499 | uint16_t kmrn_reg; |
12500 | |
12501 | /* Only for igp3 */ |
12502 | if (sc->sc_phytype == WMPHY_IGP_3) { |
12503 | kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG); |
12504 | kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK; |
12505 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); |
12506 | kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK; |
12507 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); |
12508 | } |
12509 | } |
12510 | |
12511 | /* |
12512 | * Workaround for pch's PHYs |
12513 | * XXX should be moved to new PHY driver? |
12514 | */ |
12515 | static void |
12516 | wm_hv_phy_workaround_ich8lan(struct wm_softc *sc) |
12517 | { |
12518 | |
12519 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12520 | device_xname(sc->sc_dev), __func__)); |
12521 | KASSERT(sc->sc_type == WM_T_PCH); |
12522 | |
12523 | if (sc->sc_phytype == WMPHY_82577) |
12524 | wm_set_mdio_slow_mode_hv(sc); |
12525 | |
12526 | /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */ |
12527 | |
12528 | /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/ |
12529 | |
12530 | /* 82578 */ |
12531 | if (sc->sc_phytype == WMPHY_82578) { |
12532 | struct mii_softc *child; |
12533 | |
12534 | /* |
12535 | * Return registers to default by doing a soft reset then |
12536 | * writing 0x3140 to the control register |
12537 | * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 |
12538 | */ |
12539 | child = LIST_FIRST(&sc->sc_mii.mii_phys); |
12540 | if ((child != NULL) && (child->mii_mpd_rev < 2)) { |
12541 | PHY_RESET(child); |
12542 | sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR, |
12543 | 0x3140); |
12544 | } |
12545 | } |
12546 | |
12547 | /* Select page 0 */ |
12548 | sc->phy.acquire(sc); |
12549 | wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0); |
12550 | sc->phy.release(sc); |
12551 | |
12552 | /* |
12553 | * Configure the K1 Si workaround during phy reset assuming there is |
12554 | * link so that it disables K1 if link is in 1Gbps. |
12555 | */ |
12556 | wm_k1_gig_workaround_hv(sc, 1); |
12557 | } |
12558 | |
12559 | static void |
12560 | wm_lv_phy_workaround_ich8lan(struct wm_softc *sc) |
12561 | { |
12562 | |
12563 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12564 | device_xname(sc->sc_dev), __func__)); |
12565 | KASSERT(sc->sc_type == WM_T_PCH2); |
12566 | |
12567 | wm_set_mdio_slow_mode_hv(sc); |
12568 | } |
12569 | |
12570 | static int |
12571 | wm_k1_gig_workaround_hv(struct wm_softc *sc, int link) |
12572 | { |
12573 | int k1_enable = sc->sc_nvm_k1_enabled; |
12574 | |
12575 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12576 | device_xname(sc->sc_dev), __func__)); |
12577 | |
12578 | if (sc->phy.acquire(sc) != 0) |
12579 | return -1; |
12580 | |
12581 | if (link) { |
12582 | k1_enable = 0; |
12583 | |
12584 | /* Link stall fix for link up */ |
12585 | wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100); |
12586 | } else { |
12587 | /* Link stall fix for link down */ |
12588 | wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100); |
12589 | } |
12590 | |
12591 | wm_configure_k1_ich8lan(sc, k1_enable); |
12592 | sc->phy.release(sc); |
12593 | |
12594 | return 0; |
12595 | } |
12596 | |
12597 | static void |
12598 | wm_set_mdio_slow_mode_hv(struct wm_softc *sc) |
12599 | { |
12600 | uint32_t reg; |
12601 | |
12602 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL); |
12603 | wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, |
12604 | reg | HV_KMRN_MDIO_SLOW); |
12605 | } |
12606 | |
12607 | static void |
12608 | wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable) |
12609 | { |
12610 | uint32_t ctrl, ctrl_ext, tmp; |
12611 | uint16_t kmrn_reg; |
12612 | |
12613 | kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG); |
12614 | |
12615 | if (k1_enable) |
12616 | kmrn_reg |= KUMCTRLSTA_K1_ENABLE; |
12617 | else |
12618 | kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE; |
12619 | |
12620 | wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg); |
12621 | |
12622 | delay(20); |
12623 | |
12624 | ctrl = CSR_READ(sc, WMREG_CTRL); |
12625 | ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); |
12626 | |
12627 | tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100); |
12628 | tmp |= CTRL_FRCSPD; |
12629 | |
12630 | CSR_WRITE(sc, WMREG_CTRL, tmp); |
12631 | CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS); |
12632 | CSR_WRITE_FLUSH(sc); |
12633 | delay(20); |
12634 | |
12635 | CSR_WRITE(sc, WMREG_CTRL, ctrl); |
12636 | CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); |
12637 | CSR_WRITE_FLUSH(sc); |
12638 | delay(20); |
12639 | } |
12640 | |
12641 | /* special case - for 82575 - need to do manual init ... */ |
12642 | static void |
12643 | wm_reset_init_script_82575(struct wm_softc *sc) |
12644 | { |
12645 | /* |
12646 | * remark: this is untested code - we have no board without EEPROM |
12647 | * same setup as mentioned int the FreeBSD driver for the i82575 |
12648 | */ |
12649 | |
12650 | /* SerDes configuration via SERDESCTRL */ |
12651 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c); |
12652 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78); |
12653 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23); |
12654 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15); |
12655 | |
12656 | /* CCM configuration via CCMCTL register */ |
12657 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00); |
12658 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00); |
12659 | |
12660 | /* PCIe lanes configuration */ |
12661 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec); |
12662 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf); |
12663 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05); |
12664 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81); |
12665 | |
12666 | /* PCIe PLL Configuration */ |
12667 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47); |
12668 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00); |
12669 | wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00); |
12670 | } |
12671 | |
12672 | static void |
12673 | wm_reset_mdicnfg_82580(struct wm_softc *sc) |
12674 | { |
12675 | uint32_t reg; |
12676 | uint16_t nvmword; |
12677 | int rv; |
12678 | |
12679 | if ((sc->sc_flags & WM_F_SGMII) == 0) |
12680 | return; |
12681 | |
12682 | rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) |
12683 | + NVM_OFF_CFG3_PORTA, 1, &nvmword); |
12684 | if (rv != 0) { |
12685 | aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n" , |
12686 | __func__); |
12687 | return; |
12688 | } |
12689 | |
12690 | reg = CSR_READ(sc, WMREG_MDICNFG); |
12691 | if (nvmword & NVM_CFG3_PORTA_EXT_MDIO) |
12692 | reg |= MDICNFG_DEST; |
12693 | if (nvmword & NVM_CFG3_PORTA_COM_MDIO) |
12694 | reg |= MDICNFG_COM_MDIO; |
12695 | CSR_WRITE(sc, WMREG_MDICNFG, reg); |
12696 | } |
12697 | |
12698 | #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff)) |
12699 | |
12700 | static bool |
12701 | wm_phy_is_accessible_pchlan(struct wm_softc *sc) |
12702 | { |
12703 | int i; |
12704 | uint32_t reg; |
12705 | uint16_t id1, id2; |
12706 | |
12707 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12708 | device_xname(sc->sc_dev), __func__)); |
12709 | id1 = id2 = 0xffff; |
12710 | for (i = 0; i < 2; i++) { |
12711 | id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1); |
12712 | if (MII_INVALIDID(id1)) |
12713 | continue; |
12714 | id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2); |
12715 | if (MII_INVALIDID(id2)) |
12716 | continue; |
12717 | break; |
12718 | } |
12719 | if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) { |
12720 | goto out; |
12721 | } |
12722 | |
12723 | if (sc->sc_type < WM_T_PCH_LPT) { |
12724 | sc->phy.release(sc); |
12725 | wm_set_mdio_slow_mode_hv(sc); |
12726 | id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1); |
12727 | id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2); |
12728 | sc->phy.acquire(sc); |
12729 | } |
12730 | if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) { |
12731 | printf("XXX return with false\n" ); |
12732 | return false; |
12733 | } |
12734 | out: |
12735 | if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) { |
12736 | /* Only unforce SMBus if ME is not active */ |
12737 | if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { |
12738 | /* Unforce SMBus mode in PHY */ |
12739 | reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, |
12740 | CV_SMB_CTRL); |
12741 | reg &= ~CV_SMB_CTRL_FORCE_SMBUS; |
12742 | wm_gmii_hv_writereg_locked(sc->sc_dev, 2, |
12743 | CV_SMB_CTRL, reg); |
12744 | |
12745 | /* Unforce SMBus mode in MAC */ |
12746 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
12747 | reg &= ~CTRL_EXT_FORCE_SMBUS; |
12748 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
12749 | } |
12750 | } |
12751 | return true; |
12752 | } |
12753 | |
12754 | static void |
12755 | wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc) |
12756 | { |
12757 | uint32_t reg; |
12758 | int i; |
12759 | |
12760 | /* Set PHY Config Counter to 50msec */ |
12761 | reg = CSR_READ(sc, WMREG_FEXTNVM3); |
12762 | reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK; |
12763 | reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS; |
12764 | CSR_WRITE(sc, WMREG_FEXTNVM3, reg); |
12765 | |
12766 | /* Toggle LANPHYPC */ |
12767 | reg = CSR_READ(sc, WMREG_CTRL); |
12768 | reg |= CTRL_LANPHYPC_OVERRIDE; |
12769 | reg &= ~CTRL_LANPHYPC_VALUE; |
12770 | CSR_WRITE(sc, WMREG_CTRL, reg); |
12771 | CSR_WRITE_FLUSH(sc); |
12772 | delay(1000); |
12773 | reg &= ~CTRL_LANPHYPC_OVERRIDE; |
12774 | CSR_WRITE(sc, WMREG_CTRL, reg); |
12775 | CSR_WRITE_FLUSH(sc); |
12776 | |
12777 | if (sc->sc_type < WM_T_PCH_LPT) |
12778 | delay(50 * 1000); |
12779 | else { |
12780 | i = 20; |
12781 | |
12782 | do { |
12783 | delay(5 * 1000); |
12784 | } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0) |
12785 | && i--); |
12786 | |
12787 | delay(30 * 1000); |
12788 | } |
12789 | } |
12790 | |
12791 | static int |
12792 | wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link) |
12793 | { |
12794 | uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ) |
12795 | | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND; |
12796 | uint32_t rxa; |
12797 | uint16_t scale = 0, lat_enc = 0; |
12798 | int64_t lat_ns, value; |
12799 | |
12800 | DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n" , |
12801 | device_xname(sc->sc_dev), __func__)); |
12802 | |
12803 | if (link) { |
12804 | pcireg_t preg; |
12805 | uint16_t max_snoop, max_nosnoop, max_ltr_enc; |
12806 | |
12807 | rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK; |
12808 | |
12809 | /* |
12810 | * Determine the maximum latency tolerated by the device. |
12811 | * |
12812 | * Per the PCIe spec, the tolerated latencies are encoded as |
12813 | * a 3-bit encoded scale (only 0-5 are valid) multiplied by |
12814 | * a 10-bit value (0-1023) to provide a range from 1 ns to |
12815 | * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, |
12816 | * 1=2^5ns, 2=2^10ns,...5=2^25ns. |
12817 | */ |
12818 | lat_ns = ((int64_t)rxa * 1024 - |
12819 | (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000; |
12820 | if (lat_ns < 0) |
12821 | lat_ns = 0; |
12822 | else { |
12823 | uint32_t status; |
12824 | uint16_t speed; |
12825 | |
12826 | status = CSR_READ(sc, WMREG_STATUS); |
12827 | switch (__SHIFTOUT(status, STATUS_SPEED)) { |
12828 | case STATUS_SPEED_10: |
12829 | speed = 10; |
12830 | break; |
12831 | case STATUS_SPEED_100: |
12832 | speed = 100; |
12833 | break; |
12834 | case STATUS_SPEED_1000: |
12835 | speed = 1000; |
12836 | break; |
12837 | default: |
12838 | printf("%s: Unknown speed (status = %08x)\n" , |
12839 | device_xname(sc->sc_dev), status); |
12840 | return -1; |
12841 | } |
12842 | lat_ns /= speed; |
12843 | } |
12844 | value = lat_ns; |
12845 | |
12846 | while (value > LTRV_VALUE) { |
12847 | scale ++; |
12848 | value = howmany(value, __BIT(5)); |
12849 | } |
12850 | if (scale > LTRV_SCALE_MAX) { |
12851 | printf("%s: Invalid LTR latency scale %d\n" , |
12852 | device_xname(sc->sc_dev), scale); |
12853 | return -1; |
12854 | } |
12855 | lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value); |
12856 | |
12857 | preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
12858 | WM_PCI_LTR_CAP_LPT); |
12859 | max_snoop = preg & 0xffff; |
12860 | max_nosnoop = preg >> 16; |
12861 | |
12862 | max_ltr_enc = MAX(max_snoop, max_nosnoop); |
12863 | |
12864 | if (lat_enc > max_ltr_enc) { |
12865 | lat_enc = max_ltr_enc; |
12866 | } |
12867 | } |
12868 | /* Snoop and No-Snoop latencies the same */ |
12869 | reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP); |
12870 | CSR_WRITE(sc, WMREG_LTRV, reg); |
12871 | |
12872 | return 0; |
12873 | } |
12874 | |
12875 | /* |
12876 | * I210 Errata 25 and I211 Errata 10 |
12877 | * Slow System Clock. |
12878 | */ |
12879 | static void |
12880 | wm_pll_workaround_i210(struct wm_softc *sc) |
12881 | { |
12882 | uint32_t mdicnfg, wuc; |
12883 | uint32_t reg; |
12884 | pcireg_t pcireg; |
12885 | uint32_t pmreg; |
12886 | uint16_t nvmword, tmp_nvmword; |
12887 | int phyval; |
12888 | bool wa_done = false; |
12889 | int i; |
12890 | |
12891 | /* Save WUC and MDICNFG registers */ |
12892 | wuc = CSR_READ(sc, WMREG_WUC); |
12893 | mdicnfg = CSR_READ(sc, WMREG_MDICNFG); |
12894 | |
12895 | reg = mdicnfg & ~MDICNFG_DEST; |
12896 | CSR_WRITE(sc, WMREG_MDICNFG, reg); |
12897 | |
12898 | if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) |
12899 | nvmword = INVM_DEFAULT_AL; |
12900 | tmp_nvmword = nvmword | INVM_PLL_WO_VAL; |
12901 | |
12902 | /* Get Power Management cap offset */ |
12903 | if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, |
12904 | &pmreg, NULL) == 0) |
12905 | return; |
12906 | for (i = 0; i < WM_MAX_PLL_TRIES; i++) { |
12907 | phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1, |
12908 | GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG); |
12909 | |
12910 | if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) { |
12911 | break; /* OK */ |
12912 | } |
12913 | |
12914 | wa_done = true; |
12915 | /* Directly reset the internal PHY */ |
12916 | reg = CSR_READ(sc, WMREG_CTRL); |
12917 | CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET); |
12918 | |
12919 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
12920 | reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE; |
12921 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
12922 | |
12923 | CSR_WRITE(sc, WMREG_WUC, 0); |
12924 | reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16); |
12925 | CSR_WRITE(sc, WMREG_EEARBC_I210, reg); |
12926 | |
12927 | pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
12928 | pmreg + PCI_PMCSR); |
12929 | pcireg |= PCI_PMCSR_STATE_D3; |
12930 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, |
12931 | pmreg + PCI_PMCSR, pcireg); |
12932 | delay(1000); |
12933 | pcireg &= ~PCI_PMCSR_STATE_D3; |
12934 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, |
12935 | pmreg + PCI_PMCSR, pcireg); |
12936 | |
12937 | reg = (INVM_AUTOLOAD << 4) | (nvmword << 16); |
12938 | CSR_WRITE(sc, WMREG_EEARBC_I210, reg); |
12939 | |
12940 | /* Restore WUC register */ |
12941 | CSR_WRITE(sc, WMREG_WUC, wuc); |
12942 | } |
12943 | |
12944 | /* Restore MDICNFG setting */ |
12945 | CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg); |
12946 | if (wa_done) |
12947 | aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n" ); |
12948 | } |
12949 | |