1 | /****************************************************************************** |
2 | |
3 | Copyright (c) 2001-2013, Intel Corporation |
4 | All rights reserved. |
5 | |
6 | Redistribution and use in source and binary forms, with or without |
7 | modification, are permitted provided that the following conditions are met: |
8 | |
9 | 1. Redistributions of source code must retain the above copyright notice, |
10 | this list of conditions and the following disclaimer. |
11 | |
12 | 2. Redistributions in binary form must reproduce the above copyright |
13 | notice, this list of conditions and the following disclaimer in the |
14 | documentation and/or other materials provided with the distribution. |
15 | |
16 | 3. Neither the name of the Intel Corporation nor the names of its |
17 | contributors may be used to endorse or promote products derived from |
18 | this software without specific prior written permission. |
19 | |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
23 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
24 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | POSSIBILITY OF SUCH DAMAGE. |
31 | |
32 | ******************************************************************************/ |
33 | /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 251964 2013-06-18 21:28:19Z jfv $*/ |
34 | /*$NetBSD: ixgbe_82598.c,v 1.5 2015/08/05 04:08:44 msaitoh Exp $*/ |
35 | |
36 | #include "ixgbe_type.h" |
37 | #include "ixgbe_82598.h" |
38 | #include "ixgbe_api.h" |
39 | #include "ixgbe_common.h" |
40 | #include "ixgbe_phy.h" |
41 | |
42 | static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, |
43 | ixgbe_link_speed *speed, |
44 | bool *autoneg); |
45 | static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); |
46 | static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, |
47 | bool autoneg_wait_to_complete); |
48 | static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, |
49 | ixgbe_link_speed *speed, bool *link_up, |
50 | bool link_up_wait_to_complete); |
51 | static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, |
52 | ixgbe_link_speed speed, |
53 | bool autoneg_wait_to_complete); |
54 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, |
55 | ixgbe_link_speed speed, |
56 | bool autoneg_wait_to_complete); |
57 | static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); |
58 | static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); |
59 | static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); |
60 | static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, |
61 | u32 headroom, int strategy); |
62 | static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, |
63 | u8 *sff8472_data); |
64 | /** |
65 | * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout |
66 | * @hw: pointer to the HW structure |
67 | * |
68 | * The defaults for 82598 should be in the range of 50us to 50ms, |
69 | * however the hardware default for these parts is 500us to 1ms which is less |
70 | * than the 10ms recommended by the pci-e spec. To address this we need to |
71 | * increase the value to either 10ms to 250ms for capability version 1 config, |
72 | * or 16ms to 55ms for version 2. |
73 | **/ |
74 | void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) |
75 | { |
76 | u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); |
77 | u16 pcie_devctl2; |
78 | |
79 | /* only take action if timeout value is defaulted to 0 */ |
80 | if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) |
81 | goto out; |
82 | |
83 | /* |
84 | * if capababilities version is type 1 we can write the |
85 | * timeout of 10ms to 250ms through the GCR register |
86 | */ |
87 | if (!(gcr & IXGBE_GCR_CAP_VER2)) { |
88 | gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; |
89 | goto out; |
90 | } |
91 | |
92 | /* |
93 | * for version 2 capabilities we need to write the config space |
94 | * directly in order to set the completion timeout value for |
95 | * 16ms to 55ms |
96 | */ |
97 | pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); |
98 | pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; |
99 | IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); |
100 | out: |
101 | /* disable completion timeout resend */ |
102 | gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; |
103 | IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); |
104 | } |
105 | |
106 | /** |
107 | * ixgbe_init_ops_82598 - Inits func ptrs and MAC type |
108 | * @hw: pointer to hardware structure |
109 | * |
110 | * Initialize the function pointers and assign the MAC type for 82598. |
111 | * Does not touch the hardware. |
112 | **/ |
113 | s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) |
114 | { |
115 | struct ixgbe_mac_info *mac = &hw->mac; |
116 | struct ixgbe_phy_info *phy = &hw->phy; |
117 | s32 ret_val; |
118 | |
119 | DEBUGFUNC("ixgbe_init_ops_82598" ); |
120 | |
121 | ret_val = ixgbe_init_phy_ops_generic(hw); |
122 | ret_val = ixgbe_init_ops_generic(hw); |
123 | |
124 | /* PHY */ |
125 | phy->ops.init = &ixgbe_init_phy_ops_82598; |
126 | |
127 | /* MAC */ |
128 | mac->ops.start_hw = &ixgbe_start_hw_82598; |
129 | mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598; |
130 | mac->ops.reset_hw = &ixgbe_reset_hw_82598; |
131 | mac->ops.get_media_type = &ixgbe_get_media_type_82598; |
132 | mac->ops.get_supported_physical_layer = |
133 | &ixgbe_get_supported_physical_layer_82598; |
134 | mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598; |
135 | mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598; |
136 | mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598; |
137 | |
138 | /* RAR, Multicast, VLAN */ |
139 | mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; |
140 | mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598; |
141 | mac->ops.set_vfta = &ixgbe_set_vfta_82598; |
142 | mac->ops.set_vlvf = NULL; |
143 | mac->ops.clear_vfta = &ixgbe_clear_vfta_82598; |
144 | |
145 | /* Flow Control */ |
146 | mac->ops.fc_enable = &ixgbe_fc_enable_82598; |
147 | |
148 | mac->mcft_size = 128; |
149 | mac->vft_size = 128; |
150 | mac->num_rar_entries = 16; |
151 | mac->rx_pb_size = 512; |
152 | mac->max_tx_queues = 32; |
153 | mac->max_rx_queues = 64; |
154 | mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); |
155 | |
156 | /* SFP+ Module */ |
157 | phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598; |
158 | phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598; |
159 | |
160 | /* Link */ |
161 | mac->ops.check_link = &ixgbe_check_mac_link_82598; |
162 | mac->ops.setup_link = &ixgbe_setup_mac_link_82598; |
163 | mac->ops.flap_tx_laser = NULL; |
164 | mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598; |
165 | mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598; |
166 | |
167 | /* Manageability interface */ |
168 | mac->ops.set_fw_drv_ver = NULL; |
169 | |
170 | mac->ops.get_rtrup2tc = NULL; |
171 | |
172 | return ret_val; |
173 | } |
174 | |
175 | /** |
176 | * ixgbe_init_phy_ops_82598 - PHY/SFP specific init |
177 | * @hw: pointer to hardware structure |
178 | * |
179 | * Initialize any function pointers that were not able to be |
180 | * set during init_shared_code because the PHY/SFP type was |
181 | * not known. Perform the SFP init if necessary. |
182 | * |
183 | **/ |
184 | s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) |
185 | { |
186 | struct ixgbe_mac_info *mac = &hw->mac; |
187 | struct ixgbe_phy_info *phy = &hw->phy; |
188 | s32 ret_val = IXGBE_SUCCESS; |
189 | u16 list_offset, data_offset; |
190 | |
191 | DEBUGFUNC("ixgbe_init_phy_ops_82598" ); |
192 | |
193 | /* Identify the PHY */ |
194 | phy->ops.identify(hw); |
195 | |
196 | /* Overwrite the link function pointers if copper PHY */ |
197 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { |
198 | mac->ops.setup_link = &ixgbe_setup_copper_link_82598; |
199 | mac->ops.get_link_capabilities = |
200 | &ixgbe_get_copper_link_capabilities_generic; |
201 | } |
202 | |
203 | switch (hw->phy.type) { |
204 | case ixgbe_phy_tn: |
205 | phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; |
206 | phy->ops.check_link = &ixgbe_check_phy_link_tnx; |
207 | phy->ops.get_firmware_version = |
208 | &ixgbe_get_phy_firmware_version_tnx; |
209 | break; |
210 | case ixgbe_phy_nl: |
211 | phy->ops.reset = &ixgbe_reset_phy_nl; |
212 | |
213 | /* Call SFP+ identify routine to get the SFP+ module type */ |
214 | ret_val = phy->ops.identify_sfp(hw); |
215 | if (ret_val != IXGBE_SUCCESS) |
216 | goto out; |
217 | else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { |
218 | ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; |
219 | goto out; |
220 | } |
221 | |
222 | /* Check to see if SFP+ module is supported */ |
223 | ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, |
224 | &list_offset, |
225 | &data_offset); |
226 | if (ret_val != IXGBE_SUCCESS) { |
227 | ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; |
228 | goto out; |
229 | } |
230 | break; |
231 | default: |
232 | break; |
233 | } |
234 | |
235 | out: |
236 | return ret_val; |
237 | } |
238 | |
239 | /** |
240 | * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx |
241 | * @hw: pointer to hardware structure |
242 | * |
243 | * Starts the hardware using the generic start_hw function. |
244 | * Disables relaxed ordering Then set pcie completion timeout |
245 | * |
246 | **/ |
247 | s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) |
248 | { |
249 | u32 regval; |
250 | u32 i; |
251 | s32 ret_val = IXGBE_SUCCESS; |
252 | |
253 | DEBUGFUNC("ixgbe_start_hw_82598" ); |
254 | |
255 | ret_val = ixgbe_start_hw_generic(hw); |
256 | |
257 | /* Disable relaxed ordering */ |
258 | for (i = 0; ((i < hw->mac.max_tx_queues) && |
259 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { |
260 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); |
261 | regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; |
262 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); |
263 | } |
264 | |
265 | for (i = 0; ((i < hw->mac.max_rx_queues) && |
266 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { |
267 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); |
268 | regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | |
269 | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); |
270 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); |
271 | } |
272 | |
273 | /* set the completion timeout for interface */ |
274 | if (ret_val == IXGBE_SUCCESS) |
275 | ixgbe_set_pcie_completion_timeout(hw); |
276 | |
277 | return ret_val; |
278 | } |
279 | |
280 | /** |
281 | * ixgbe_get_link_capabilities_82598 - Determines link capabilities |
282 | * @hw: pointer to hardware structure |
283 | * @speed: pointer to link speed |
284 | * @autoneg: boolean auto-negotiation value |
285 | * |
286 | * Determines the link capabilities by reading the AUTOC register. |
287 | **/ |
288 | static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, |
289 | ixgbe_link_speed *speed, |
290 | bool *autoneg) |
291 | { |
292 | s32 status = IXGBE_SUCCESS; |
293 | u32 autoc = 0; |
294 | |
295 | DEBUGFUNC("ixgbe_get_link_capabilities_82598" ); |
296 | |
297 | /* |
298 | * Determine link capabilities based on the stored value of AUTOC, |
299 | * which represents EEPROM defaults. If AUTOC value has not been |
300 | * stored, use the current register value. |
301 | */ |
302 | if (hw->mac.orig_link_settings_stored) |
303 | autoc = hw->mac.orig_autoc; |
304 | else |
305 | autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
306 | |
307 | switch (autoc & IXGBE_AUTOC_LMS_MASK) { |
308 | case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: |
309 | *speed = IXGBE_LINK_SPEED_1GB_FULL; |
310 | *autoneg = FALSE; |
311 | break; |
312 | |
313 | case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: |
314 | *speed = IXGBE_LINK_SPEED_10GB_FULL; |
315 | *autoneg = FALSE; |
316 | break; |
317 | |
318 | case IXGBE_AUTOC_LMS_1G_AN: |
319 | *speed = IXGBE_LINK_SPEED_1GB_FULL; |
320 | *autoneg = TRUE; |
321 | break; |
322 | |
323 | case IXGBE_AUTOC_LMS_KX4_AN: |
324 | case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: |
325 | *speed = IXGBE_LINK_SPEED_UNKNOWN; |
326 | if (autoc & IXGBE_AUTOC_KX4_SUPP) |
327 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
328 | if (autoc & IXGBE_AUTOC_KX_SUPP) |
329 | *speed |= IXGBE_LINK_SPEED_1GB_FULL; |
330 | *autoneg = TRUE; |
331 | break; |
332 | |
333 | default: |
334 | status = IXGBE_ERR_LINK_SETUP; |
335 | break; |
336 | } |
337 | |
338 | return status; |
339 | } |
340 | |
341 | /** |
342 | * ixgbe_get_media_type_82598 - Determines media type |
343 | * @hw: pointer to hardware structure |
344 | * |
345 | * Returns the media type (fiber, copper, backplane) |
346 | **/ |
347 | static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) |
348 | { |
349 | enum ixgbe_media_type media_type; |
350 | |
351 | DEBUGFUNC("ixgbe_get_media_type_82598" ); |
352 | |
353 | /* Detect if there is a copper PHY attached. */ |
354 | switch (hw->phy.type) { |
355 | case ixgbe_phy_cu_unknown: |
356 | case ixgbe_phy_tn: |
357 | media_type = ixgbe_media_type_copper; |
358 | goto out; |
359 | default: |
360 | break; |
361 | } |
362 | |
363 | /* Media type for I82598 is based on device ID */ |
364 | switch (hw->device_id) { |
365 | case IXGBE_DEV_ID_82598: |
366 | case IXGBE_DEV_ID_82598_BX: |
367 | /* Default device ID is mezzanine card KX/KX4 */ |
368 | media_type = ixgbe_media_type_backplane; |
369 | break; |
370 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: |
371 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: |
372 | case IXGBE_DEV_ID_82598_DA_DUAL_PORT: |
373 | case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: |
374 | case IXGBE_DEV_ID_82598EB_XF_LR: |
375 | case IXGBE_DEV_ID_82598EB_SFP_LOM: |
376 | media_type = ixgbe_media_type_fiber; |
377 | break; |
378 | case IXGBE_DEV_ID_82598EB_CX4: |
379 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: |
380 | media_type = ixgbe_media_type_cx4; |
381 | break; |
382 | case IXGBE_DEV_ID_82598AT: |
383 | case IXGBE_DEV_ID_82598AT2: |
384 | media_type = ixgbe_media_type_copper; |
385 | break; |
386 | default: |
387 | media_type = ixgbe_media_type_unknown; |
388 | break; |
389 | } |
390 | out: |
391 | return media_type; |
392 | } |
393 | |
394 | /** |
395 | * ixgbe_fc_enable_82598 - Enable flow control |
396 | * @hw: pointer to hardware structure |
397 | * |
398 | * Enable flow control according to the current settings. |
399 | **/ |
400 | s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) |
401 | { |
402 | s32 ret_val = IXGBE_SUCCESS; |
403 | u32 fctrl_reg; |
404 | u32 rmcs_reg; |
405 | u32 reg; |
406 | u32 fcrtl, fcrth; |
407 | u32 link_speed = 0; |
408 | int i; |
409 | bool link_up; |
410 | |
411 | DEBUGFUNC("ixgbe_fc_enable_82598" ); |
412 | |
413 | /* Validate the water mark configuration */ |
414 | if (!hw->fc.pause_time) { |
415 | ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; |
416 | goto out; |
417 | } |
418 | |
419 | /* Low water mark of zero causes XOFF floods */ |
420 | for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { |
421 | if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && |
422 | hw->fc.high_water[i]) { |
423 | if (!hw->fc.low_water[i] || |
424 | hw->fc.low_water[i] >= hw->fc.high_water[i]) { |
425 | DEBUGOUT("Invalid water mark configuration\n" ); |
426 | ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; |
427 | goto out; |
428 | } |
429 | } |
430 | } |
431 | |
432 | /* |
433 | * On 82598 having Rx FC on causes resets while doing 1G |
434 | * so if it's on turn it off once we know link_speed. For |
435 | * more details see 82598 Specification update. |
436 | */ |
437 | hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); |
438 | if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { |
439 | switch (hw->fc.requested_mode) { |
440 | case ixgbe_fc_full: |
441 | hw->fc.requested_mode = ixgbe_fc_tx_pause; |
442 | break; |
443 | case ixgbe_fc_rx_pause: |
444 | hw->fc.requested_mode = ixgbe_fc_none; |
445 | break; |
446 | default: |
447 | /* no change */ |
448 | break; |
449 | } |
450 | } |
451 | |
452 | /* Negotiate the fc mode to use */ |
453 | ixgbe_fc_autoneg(hw); |
454 | |
455 | /* Disable any previous flow control settings */ |
456 | fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
457 | fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); |
458 | |
459 | rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); |
460 | rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); |
461 | |
462 | /* |
463 | * The possible values of fc.current_mode are: |
464 | * 0: Flow control is completely disabled |
465 | * 1: Rx flow control is enabled (we can receive pause frames, |
466 | * but not send pause frames). |
467 | * 2: Tx flow control is enabled (we can send pause frames but |
468 | * we do not support receiving pause frames). |
469 | * 3: Both Rx and Tx flow control (symmetric) are enabled. |
470 | * other: Invalid. |
471 | */ |
472 | switch (hw->fc.current_mode) { |
473 | case ixgbe_fc_none: |
474 | /* |
475 | * Flow control is disabled by software override or autoneg. |
476 | * The code below will actually disable it in the HW. |
477 | */ |
478 | break; |
479 | case ixgbe_fc_rx_pause: |
480 | /* |
481 | * Rx Flow control is enabled and Tx Flow control is |
482 | * disabled by software override. Since there really |
483 | * isn't a way to advertise that we are capable of RX |
484 | * Pause ONLY, we will advertise that we support both |
485 | * symmetric and asymmetric Rx PAUSE. Later, we will |
486 | * disable the adapter's ability to send PAUSE frames. |
487 | */ |
488 | fctrl_reg |= IXGBE_FCTRL_RFCE; |
489 | break; |
490 | case ixgbe_fc_tx_pause: |
491 | /* |
492 | * Tx Flow control is enabled, and Rx Flow control is |
493 | * disabled by software override. |
494 | */ |
495 | rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; |
496 | break; |
497 | case ixgbe_fc_full: |
498 | /* Flow control (both Rx and Tx) is enabled by SW override. */ |
499 | fctrl_reg |= IXGBE_FCTRL_RFCE; |
500 | rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; |
501 | break; |
502 | default: |
503 | DEBUGOUT("Flow control param set incorrectly\n" ); |
504 | ret_val = IXGBE_ERR_CONFIG; |
505 | goto out; |
506 | break; |
507 | } |
508 | |
509 | /* Set 802.3x based flow control settings. */ |
510 | fctrl_reg |= IXGBE_FCTRL_DPF; |
511 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); |
512 | IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); |
513 | |
514 | /* Set up and enable Rx high/low water mark thresholds, enable XON. */ |
515 | for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { |
516 | if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && |
517 | hw->fc.high_water[i]) { |
518 | fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; |
519 | fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; |
520 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); |
521 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); |
522 | } else { |
523 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); |
524 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); |
525 | } |
526 | |
527 | } |
528 | |
529 | /* Configure pause time (2 TCs per register) */ |
530 | reg = hw->fc.pause_time * 0x00010001; |
531 | for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) |
532 | IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); |
533 | |
534 | /* Configure flow control refresh threshold value */ |
535 | IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); |
536 | |
537 | out: |
538 | return ret_val; |
539 | } |
540 | |
541 | /** |
542 | * ixgbe_start_mac_link_82598 - Configures MAC link settings |
543 | * @hw: pointer to hardware structure |
544 | * |
545 | * Configures link settings based on values in the ixgbe_hw struct. |
546 | * Restarts the link. Performs autonegotiation if needed. |
547 | **/ |
548 | static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, |
549 | bool autoneg_wait_to_complete) |
550 | { |
551 | u32 autoc_reg; |
552 | u32 links_reg; |
553 | u32 i; |
554 | s32 status = IXGBE_SUCCESS; |
555 | |
556 | DEBUGFUNC("ixgbe_start_mac_link_82598" ); |
557 | |
558 | /* Restart link */ |
559 | autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
560 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; |
561 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); |
562 | |
563 | /* Only poll for autoneg to complete if specified to do so */ |
564 | if (autoneg_wait_to_complete) { |
565 | if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == |
566 | IXGBE_AUTOC_LMS_KX4_AN || |
567 | (autoc_reg & IXGBE_AUTOC_LMS_MASK) == |
568 | IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { |
569 | links_reg = 0; /* Just in case Autoneg time = 0 */ |
570 | for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { |
571 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); |
572 | if (links_reg & IXGBE_LINKS_KX_AN_COMP) |
573 | break; |
574 | msec_delay(100); |
575 | } |
576 | if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { |
577 | status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; |
578 | DEBUGOUT("Autonegotiation did not complete.\n" ); |
579 | } |
580 | } |
581 | } |
582 | |
583 | /* Add delay to filter out noises during initial link setup */ |
584 | msec_delay(50); |
585 | |
586 | return status; |
587 | } |
588 | |
589 | /** |
590 | * ixgbe_validate_link_ready - Function looks for phy link |
591 | * @hw: pointer to hardware structure |
592 | * |
593 | * Function indicates success when phy link is available. If phy is not ready |
594 | * within 5 seconds of MAC indicating link, the function returns error. |
595 | **/ |
596 | static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) |
597 | { |
598 | u32 timeout; |
599 | u16 an_reg; |
600 | |
601 | if (hw->device_id != IXGBE_DEV_ID_82598AT2) |
602 | return IXGBE_SUCCESS; |
603 | |
604 | for (timeout = 0; |
605 | timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { |
606 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, |
607 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); |
608 | |
609 | if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && |
610 | (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) |
611 | break; |
612 | |
613 | msec_delay(100); |
614 | } |
615 | |
616 | if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { |
617 | DEBUGOUT("Link was indicated but link is down\n" ); |
618 | return IXGBE_ERR_LINK_SETUP; |
619 | } |
620 | |
621 | return IXGBE_SUCCESS; |
622 | } |
623 | |
624 | /** |
625 | * ixgbe_check_mac_link_82598 - Get link/speed status |
626 | * @hw: pointer to hardware structure |
627 | * @speed: pointer to link speed |
628 | * @link_up: TRUE is link is up, FALSE otherwise |
629 | * @link_up_wait_to_complete: bool used to wait for link up or not |
630 | * |
631 | * Reads the links register to determine if link is up and the current speed |
632 | **/ |
633 | static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, |
634 | ixgbe_link_speed *speed, bool *link_up, |
635 | bool link_up_wait_to_complete) |
636 | { |
637 | u32 links_reg; |
638 | u32 i; |
639 | u16 link_reg, adapt_comp_reg; |
640 | |
641 | DEBUGFUNC("ixgbe_check_mac_link_82598" ); |
642 | |
643 | /* |
644 | * SERDES PHY requires us to read link status from undocumented |
645 | * register 0xC79F. Bit 0 set indicates link is up/ready; clear |
646 | * indicates link down. OxC00C is read to check that the XAUI lanes |
647 | * are active. Bit 0 clear indicates active; set indicates inactive. |
648 | */ |
649 | if (hw->phy.type == ixgbe_phy_nl) { |
650 | hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); |
651 | hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); |
652 | hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, |
653 | &adapt_comp_reg); |
654 | if (link_up_wait_to_complete) { |
655 | for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { |
656 | if ((link_reg & 1) && |
657 | ((adapt_comp_reg & 1) == 0)) { |
658 | *link_up = TRUE; |
659 | break; |
660 | } else { |
661 | *link_up = FALSE; |
662 | } |
663 | msec_delay(100); |
664 | hw->phy.ops.read_reg(hw, 0xC79F, |
665 | IXGBE_TWINAX_DEV, |
666 | &link_reg); |
667 | hw->phy.ops.read_reg(hw, 0xC00C, |
668 | IXGBE_TWINAX_DEV, |
669 | &adapt_comp_reg); |
670 | } |
671 | } else { |
672 | if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) |
673 | *link_up = TRUE; |
674 | else |
675 | *link_up = FALSE; |
676 | } |
677 | |
678 | if (*link_up == FALSE) |
679 | goto out; |
680 | } |
681 | |
682 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); |
683 | if (link_up_wait_to_complete) { |
684 | for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { |
685 | if (links_reg & IXGBE_LINKS_UP) { |
686 | *link_up = TRUE; |
687 | break; |
688 | } else { |
689 | *link_up = FALSE; |
690 | } |
691 | msec_delay(100); |
692 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); |
693 | } |
694 | } else { |
695 | if (links_reg & IXGBE_LINKS_UP) |
696 | *link_up = TRUE; |
697 | else |
698 | *link_up = FALSE; |
699 | } |
700 | |
701 | if (links_reg & IXGBE_LINKS_SPEED) |
702 | *speed = IXGBE_LINK_SPEED_10GB_FULL; |
703 | else |
704 | *speed = IXGBE_LINK_SPEED_1GB_FULL; |
705 | |
706 | if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) && |
707 | (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) |
708 | *link_up = FALSE; |
709 | |
710 | out: |
711 | return IXGBE_SUCCESS; |
712 | } |
713 | |
714 | /** |
715 | * ixgbe_setup_mac_link_82598 - Set MAC link speed |
716 | * @hw: pointer to hardware structure |
717 | * @speed: new link speed |
718 | * @autoneg_wait_to_complete: TRUE when waiting for completion is needed |
719 | * |
720 | * Set the link speed in the AUTOC register and restarts link. |
721 | **/ |
722 | static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, |
723 | ixgbe_link_speed speed, |
724 | bool autoneg_wait_to_complete) |
725 | { |
726 | bool autoneg = FALSE; |
727 | s32 status = IXGBE_SUCCESS; |
728 | ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; |
729 | u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
730 | u32 autoc = curr_autoc; |
731 | u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; |
732 | |
733 | DEBUGFUNC("ixgbe_setup_mac_link_82598" ); |
734 | |
735 | /* Check to see if speed passed in is supported. */ |
736 | ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); |
737 | speed &= link_capabilities; |
738 | |
739 | if (speed == IXGBE_LINK_SPEED_UNKNOWN) |
740 | status = IXGBE_ERR_LINK_SETUP; |
741 | |
742 | /* Set KX4/KX support according to speed requested */ |
743 | else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || |
744 | link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { |
745 | autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; |
746 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) |
747 | autoc |= IXGBE_AUTOC_KX4_SUPP; |
748 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) |
749 | autoc |= IXGBE_AUTOC_KX_SUPP; |
750 | if (autoc != curr_autoc) |
751 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); |
752 | } |
753 | |
754 | if (status == IXGBE_SUCCESS) { |
755 | /* |
756 | * Setup and restart the link based on the new values in |
757 | * ixgbe_hw This will write the AUTOC register based on the new |
758 | * stored values |
759 | */ |
760 | status = ixgbe_start_mac_link_82598(hw, |
761 | autoneg_wait_to_complete); |
762 | } |
763 | |
764 | return status; |
765 | } |
766 | |
767 | |
768 | /** |
769 | * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field |
770 | * @hw: pointer to hardware structure |
771 | * @speed: new link speed |
772 | * @autoneg_wait_to_complete: TRUE if waiting is needed to complete |
773 | * |
774 | * Sets the link speed in the AUTOC register in the MAC and restarts link. |
775 | **/ |
776 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, |
777 | ixgbe_link_speed speed, |
778 | bool autoneg_wait_to_complete) |
779 | { |
780 | s32 status; |
781 | |
782 | DEBUGFUNC("ixgbe_setup_copper_link_82598" ); |
783 | |
784 | /* Setup the PHY according to input speed */ |
785 | status = hw->phy.ops.setup_link_speed(hw, speed, |
786 | autoneg_wait_to_complete); |
787 | /* Set up MAC */ |
788 | ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); |
789 | |
790 | return status; |
791 | } |
792 | |
793 | /** |
794 | * ixgbe_reset_hw_82598 - Performs hardware reset |
795 | * @hw: pointer to hardware structure |
796 | * |
797 | * Resets the hardware by resetting the transmit and receive units, masks and |
798 | * clears all interrupts, performing a PHY reset, and performing a link (MAC) |
799 | * reset. |
800 | **/ |
801 | static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) |
802 | { |
803 | s32 status = IXGBE_SUCCESS; |
804 | s32 phy_status = IXGBE_SUCCESS; |
805 | u32 ctrl; |
806 | u32 gheccr; |
807 | u32 i; |
808 | u32 autoc; |
809 | u8 analog_val; |
810 | |
811 | DEBUGFUNC("ixgbe_reset_hw_82598" ); |
812 | |
813 | /* Call adapter stop to disable tx/rx and clear interrupts */ |
814 | status = hw->mac.ops.stop_adapter(hw); |
815 | if (status != IXGBE_SUCCESS) |
816 | goto reset_hw_out; |
817 | |
818 | /* |
819 | * Power up the Atlas Tx lanes if they are currently powered down. |
820 | * Atlas Tx lanes are powered down for MAC loopback tests, but |
821 | * they are not automatically restored on reset. |
822 | */ |
823 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); |
824 | if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { |
825 | /* Enable Tx Atlas so packets can be transmitted again */ |
826 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, |
827 | &analog_val); |
828 | analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; |
829 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, |
830 | analog_val); |
831 | |
832 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, |
833 | &analog_val); |
834 | analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; |
835 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, |
836 | analog_val); |
837 | |
838 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, |
839 | &analog_val); |
840 | analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; |
841 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, |
842 | analog_val); |
843 | |
844 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, |
845 | &analog_val); |
846 | analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; |
847 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, |
848 | analog_val); |
849 | } |
850 | |
851 | /* Reset PHY */ |
852 | if (hw->phy.reset_disable == FALSE) { |
853 | /* PHY ops must be identified and initialized prior to reset */ |
854 | |
855 | /* Init PHY and function pointers, perform SFP setup */ |
856 | phy_status = hw->phy.ops.init(hw); |
857 | if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) |
858 | goto reset_hw_out; |
859 | if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) |
860 | goto mac_reset_top; |
861 | |
862 | hw->phy.ops.reset(hw); |
863 | } |
864 | |
865 | mac_reset_top: |
866 | /* |
867 | * Issue global reset to the MAC. This needs to be a SW reset. |
868 | * If link reset is used, it might reset the MAC when mng is using it |
869 | */ |
870 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; |
871 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); |
872 | IXGBE_WRITE_FLUSH(hw); |
873 | |
874 | /* Poll for reset bit to self-clear indicating reset is complete */ |
875 | for (i = 0; i < 10; i++) { |
876 | usec_delay(1); |
877 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
878 | if (!(ctrl & IXGBE_CTRL_RST)) |
879 | break; |
880 | } |
881 | if (ctrl & IXGBE_CTRL_RST) { |
882 | status = IXGBE_ERR_RESET_FAILED; |
883 | DEBUGOUT("Reset polling failed to complete.\n" ); |
884 | } |
885 | |
886 | msec_delay(50); |
887 | |
888 | /* |
889 | * Double resets are required for recovery from certain error |
890 | * conditions. Between resets, it is necessary to stall to allow time |
891 | * for any pending HW events to complete. |
892 | */ |
893 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { |
894 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; |
895 | goto mac_reset_top; |
896 | } |
897 | |
898 | gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); |
899 | gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); |
900 | IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); |
901 | |
902 | /* |
903 | * Store the original AUTOC value if it has not been |
904 | * stored off yet. Otherwise restore the stored original |
905 | * AUTOC value since the reset operation sets back to deaults. |
906 | */ |
907 | autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
908 | if (hw->mac.orig_link_settings_stored == FALSE) { |
909 | hw->mac.orig_autoc = autoc; |
910 | hw->mac.orig_link_settings_stored = TRUE; |
911 | } else if (autoc != hw->mac.orig_autoc) { |
912 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); |
913 | } |
914 | |
915 | /* Store the permanent mac address */ |
916 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); |
917 | |
918 | /* |
919 | * Store MAC address from RAR0, clear receive address registers, and |
920 | * clear the multicast table |
921 | */ |
922 | hw->mac.ops.init_rx_addrs(hw); |
923 | |
924 | reset_hw_out: |
925 | if (phy_status != IXGBE_SUCCESS) |
926 | status = phy_status; |
927 | |
928 | return status; |
929 | } |
930 | |
931 | /** |
932 | * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address |
933 | * @hw: pointer to hardware struct |
934 | * @rar: receive address register index to associate with a VMDq index |
935 | * @vmdq: VMDq set index |
936 | **/ |
937 | s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) |
938 | { |
939 | u32 rar_high; |
940 | u32 rar_entries = hw->mac.num_rar_entries; |
941 | |
942 | DEBUGFUNC("ixgbe_set_vmdq_82598" ); |
943 | |
944 | /* Make sure we are using a valid rar index range */ |
945 | if (rar >= rar_entries) { |
946 | DEBUGOUT1("RAR index %d is out of range.\n" , rar); |
947 | return IXGBE_ERR_INVALID_ARGUMENT; |
948 | } |
949 | |
950 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); |
951 | rar_high &= ~IXGBE_RAH_VIND_MASK; |
952 | rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); |
953 | IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); |
954 | return IXGBE_SUCCESS; |
955 | } |
956 | |
957 | /** |
958 | * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address |
959 | * @hw: pointer to hardware struct |
960 | * @rar: receive address register index to associate with a VMDq index |
961 | * @vmdq: VMDq clear index (not used in 82598, but elsewhere) |
962 | **/ |
963 | static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) |
964 | { |
965 | u32 rar_high; |
966 | u32 rar_entries = hw->mac.num_rar_entries; |
967 | |
968 | UNREFERENCED_1PARAMETER(vmdq); |
969 | |
970 | /* Make sure we are using a valid rar index range */ |
971 | if (rar >= rar_entries) { |
972 | DEBUGOUT1("RAR index %d is out of range.\n" , rar); |
973 | return IXGBE_ERR_INVALID_ARGUMENT; |
974 | } |
975 | |
976 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); |
977 | if (rar_high & IXGBE_RAH_VIND_MASK) { |
978 | rar_high &= ~IXGBE_RAH_VIND_MASK; |
979 | IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); |
980 | } |
981 | |
982 | return IXGBE_SUCCESS; |
983 | } |
984 | |
985 | /** |
986 | * ixgbe_set_vfta_82598 - Set VLAN filter table |
987 | * @hw: pointer to hardware structure |
988 | * @vlan: VLAN id to write to VLAN filter |
989 | * @vind: VMDq output index that maps queue to VLAN id in VFTA |
990 | * @vlan_on: boolean flag to turn on/off VLAN in VFTA |
991 | * |
992 | * Turn on/off specified VLAN in the VLAN filter table. |
993 | **/ |
994 | s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, |
995 | bool vlan_on) |
996 | { |
997 | u32 regindex; |
998 | u32 bitindex; |
999 | u32 bits; |
1000 | u32 vftabyte; |
1001 | |
1002 | DEBUGFUNC("ixgbe_set_vfta_82598" ); |
1003 | |
1004 | if (vlan > 4095) |
1005 | return IXGBE_ERR_PARAM; |
1006 | |
1007 | /* Determine 32-bit word position in array */ |
1008 | regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ |
1009 | |
1010 | /* Determine the location of the (VMD) queue index */ |
1011 | vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ |
1012 | bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ |
1013 | |
1014 | /* Set the nibble for VMD queue index */ |
1015 | bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); |
1016 | bits &= (~(0x0F << bitindex)); |
1017 | bits |= (vind << bitindex); |
1018 | IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); |
1019 | |
1020 | /* Determine the location of the bit for this VLAN id */ |
1021 | bitindex = vlan & 0x1F; /* lower five bits */ |
1022 | |
1023 | bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); |
1024 | if (vlan_on) |
1025 | /* Turn on this VLAN id */ |
1026 | bits |= (1 << bitindex); |
1027 | else |
1028 | /* Turn off this VLAN id */ |
1029 | bits &= ~(1 << bitindex); |
1030 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); |
1031 | |
1032 | return IXGBE_SUCCESS; |
1033 | } |
1034 | |
1035 | /** |
1036 | * ixgbe_clear_vfta_82598 - Clear VLAN filter table |
1037 | * @hw: pointer to hardware structure |
1038 | * |
1039 | * Clears the VLAN filer table, and the VMDq index associated with the filter |
1040 | **/ |
1041 | static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) |
1042 | { |
1043 | u32 offset; |
1044 | u32 vlanbyte; |
1045 | |
1046 | DEBUGFUNC("ixgbe_clear_vfta_82598" ); |
1047 | |
1048 | for (offset = 0; offset < hw->mac.vft_size; offset++) |
1049 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); |
1050 | |
1051 | for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) |
1052 | for (offset = 0; offset < hw->mac.vft_size; offset++) |
1053 | IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), |
1054 | 0); |
1055 | |
1056 | return IXGBE_SUCCESS; |
1057 | } |
1058 | |
1059 | /** |
1060 | * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register |
1061 | * @hw: pointer to hardware structure |
1062 | * @reg: analog register to read |
1063 | * @val: read value |
1064 | * |
1065 | * Performs read operation to Atlas analog register specified. |
1066 | **/ |
1067 | s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) |
1068 | { |
1069 | u32 atlas_ctl; |
1070 | |
1071 | DEBUGFUNC("ixgbe_read_analog_reg8_82598" ); |
1072 | |
1073 | IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, |
1074 | IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); |
1075 | IXGBE_WRITE_FLUSH(hw); |
1076 | usec_delay(10); |
1077 | atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); |
1078 | *val = (u8)atlas_ctl; |
1079 | |
1080 | return IXGBE_SUCCESS; |
1081 | } |
1082 | |
1083 | /** |
1084 | * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register |
1085 | * @hw: pointer to hardware structure |
1086 | * @reg: atlas register to write |
1087 | * @val: value to write |
1088 | * |
1089 | * Performs write operation to Atlas analog register specified. |
1090 | **/ |
1091 | s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) |
1092 | { |
1093 | u32 atlas_ctl; |
1094 | |
1095 | DEBUGFUNC("ixgbe_write_analog_reg8_82598" ); |
1096 | |
1097 | atlas_ctl = (reg << 8) | val; |
1098 | IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); |
1099 | IXGBE_WRITE_FLUSH(hw); |
1100 | usec_delay(10); |
1101 | |
1102 | return IXGBE_SUCCESS; |
1103 | } |
1104 | |
1105 | /** |
1106 | * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. |
1107 | * @hw: pointer to hardware structure |
1108 | * @dev_addr: address to read from |
1109 | * @byte_offset: byte offset to read from dev_addr |
1110 | * @eeprom_data: value read |
1111 | * |
1112 | * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. |
1113 | **/ |
1114 | static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, |
1115 | u8 byte_offset, u8 *eeprom_data) |
1116 | { |
1117 | s32 status = IXGBE_SUCCESS; |
1118 | u16 sfp_addr = 0; |
1119 | u16 sfp_data = 0; |
1120 | u16 sfp_stat = 0; |
1121 | u16 gssr; |
1122 | u32 i; |
1123 | |
1124 | DEBUGFUNC("ixgbe_read_i2c_phy_82598" ); |
1125 | |
1126 | if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) |
1127 | gssr = IXGBE_GSSR_PHY1_SM; |
1128 | else |
1129 | gssr = IXGBE_GSSR_PHY0_SM; |
1130 | |
1131 | if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) |
1132 | return IXGBE_ERR_SWFW_SYNC; |
1133 | |
1134 | if (hw->phy.type == ixgbe_phy_nl) { |
1135 | /* |
1136 | * NetLogic phy SDA/SCL registers are at addresses 0xC30A to |
1137 | * 0xC30D. These registers are used to talk to the SFP+ |
1138 | * module's EEPROM through the SDA/SCL (I2C) interface. |
1139 | */ |
1140 | sfp_addr = (dev_addr << 8) + byte_offset; |
1141 | sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); |
1142 | hw->phy.ops.write_reg_mdi(hw, |
1143 | IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, |
1144 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, |
1145 | sfp_addr); |
1146 | |
1147 | /* Poll status */ |
1148 | for (i = 0; i < 100; i++) { |
1149 | hw->phy.ops.read_reg_mdi(hw, |
1150 | IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, |
1151 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, |
1152 | &sfp_stat); |
1153 | sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; |
1154 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) |
1155 | break; |
1156 | msec_delay(10); |
1157 | } |
1158 | |
1159 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { |
1160 | DEBUGOUT("EEPROM read did not pass.\n" ); |
1161 | status = IXGBE_ERR_SFP_NOT_PRESENT; |
1162 | goto out; |
1163 | } |
1164 | |
1165 | /* Read data */ |
1166 | hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, |
1167 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); |
1168 | |
1169 | *eeprom_data = (u8)(sfp_data >> 8); |
1170 | } else { |
1171 | status = IXGBE_ERR_PHY; |
1172 | } |
1173 | |
1174 | out: |
1175 | hw->mac.ops.release_swfw_sync(hw, gssr); |
1176 | return status; |
1177 | } |
1178 | |
1179 | /** |
1180 | * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. |
1181 | * @hw: pointer to hardware structure |
1182 | * @byte_offset: EEPROM byte offset to read |
1183 | * @eeprom_data: value read |
1184 | * |
1185 | * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. |
1186 | **/ |
1187 | s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, |
1188 | u8 *eeprom_data) |
1189 | { |
1190 | return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, |
1191 | byte_offset, eeprom_data); |
1192 | } |
1193 | |
1194 | /** |
1195 | * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. |
1196 | * @hw: pointer to hardware structure |
1197 | * @byte_offset: byte offset at address 0xA2 |
1198 | * @eeprom_data: value read |
1199 | * |
1200 | * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C |
1201 | **/ |
1202 | static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, |
1203 | u8 *sff8472_data) |
1204 | { |
1205 | return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, |
1206 | byte_offset, sff8472_data); |
1207 | } |
1208 | |
1209 | /** |
1210 | * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type |
1211 | * @hw: pointer to hardware structure |
1212 | * |
1213 | * Determines physical layer capabilities of the current configuration. |
1214 | **/ |
1215 | u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) |
1216 | { |
1217 | u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; |
1218 | u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
1219 | u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; |
1220 | u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; |
1221 | u16 ext_ability = 0; |
1222 | |
1223 | DEBUGFUNC("ixgbe_get_supported_physical_layer_82598" ); |
1224 | |
1225 | hw->phy.ops.identify(hw); |
1226 | |
1227 | /* Copper PHY must be checked before AUTOC LMS to determine correct |
1228 | * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ |
1229 | switch (hw->phy.type) { |
1230 | case ixgbe_phy_tn: |
1231 | case ixgbe_phy_cu_unknown: |
1232 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, |
1233 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); |
1234 | if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) |
1235 | physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; |
1236 | if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) |
1237 | physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; |
1238 | if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) |
1239 | physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; |
1240 | goto out; |
1241 | default: |
1242 | break; |
1243 | } |
1244 | |
1245 | switch (autoc & IXGBE_AUTOC_LMS_MASK) { |
1246 | case IXGBE_AUTOC_LMS_1G_AN: |
1247 | case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: |
1248 | if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) |
1249 | physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; |
1250 | else |
1251 | physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; |
1252 | break; |
1253 | case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: |
1254 | if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) |
1255 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; |
1256 | else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) |
1257 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; |
1258 | else /* XAUI */ |
1259 | physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; |
1260 | break; |
1261 | case IXGBE_AUTOC_LMS_KX4_AN: |
1262 | case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: |
1263 | if (autoc & IXGBE_AUTOC_KX_SUPP) |
1264 | physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; |
1265 | if (autoc & IXGBE_AUTOC_KX4_SUPP) |
1266 | physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; |
1267 | break; |
1268 | default: |
1269 | break; |
1270 | } |
1271 | |
1272 | if (hw->phy.type == ixgbe_phy_nl) { |
1273 | hw->phy.ops.identify_sfp(hw); |
1274 | |
1275 | switch (hw->phy.sfp_type) { |
1276 | case ixgbe_sfp_type_da_cu: |
1277 | physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; |
1278 | break; |
1279 | case ixgbe_sfp_type_sr: |
1280 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; |
1281 | break; |
1282 | case ixgbe_sfp_type_lr: |
1283 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; |
1284 | break; |
1285 | default: |
1286 | physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; |
1287 | break; |
1288 | } |
1289 | } |
1290 | |
1291 | switch (hw->device_id) { |
1292 | case IXGBE_DEV_ID_82598_DA_DUAL_PORT: |
1293 | physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; |
1294 | break; |
1295 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: |
1296 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: |
1297 | case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: |
1298 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; |
1299 | break; |
1300 | case IXGBE_DEV_ID_82598EB_XF_LR: |
1301 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; |
1302 | break; |
1303 | default: |
1304 | break; |
1305 | } |
1306 | |
1307 | out: |
1308 | return physical_layer; |
1309 | } |
1310 | |
1311 | /** |
1312 | * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple |
1313 | * port devices. |
1314 | * @hw: pointer to the HW structure |
1315 | * |
1316 | * Calls common function and corrects issue with some single port devices |
1317 | * that enable LAN1 but not LAN0. |
1318 | **/ |
1319 | void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) |
1320 | { |
1321 | struct ixgbe_bus_info *bus = &hw->bus; |
1322 | u16 pci_gen = 0; |
1323 | u16 pci_ctrl2 = 0; |
1324 | |
1325 | DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598" ); |
1326 | |
1327 | ixgbe_set_lan_id_multi_port_pcie(hw); |
1328 | |
1329 | /* check if LAN0 is disabled */ |
1330 | hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); |
1331 | if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { |
1332 | |
1333 | hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); |
1334 | |
1335 | /* if LAN0 is completely disabled force function to 0 */ |
1336 | if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && |
1337 | !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && |
1338 | !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { |
1339 | |
1340 | bus->func = 0; |
1341 | } |
1342 | } |
1343 | } |
1344 | |
1345 | /** |
1346 | * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering |
1347 | * @hw: pointer to hardware structure |
1348 | * |
1349 | **/ |
1350 | void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) |
1351 | { |
1352 | u32 regval; |
1353 | u32 i; |
1354 | |
1355 | DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598" ); |
1356 | |
1357 | /* Enable relaxed ordering */ |
1358 | for (i = 0; ((i < hw->mac.max_tx_queues) && |
1359 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { |
1360 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); |
1361 | regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; |
1362 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); |
1363 | } |
1364 | |
1365 | for (i = 0; ((i < hw->mac.max_rx_queues) && |
1366 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { |
1367 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); |
1368 | regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | |
1369 | IXGBE_DCA_RXCTRL_HEAD_WRO_EN; |
1370 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); |
1371 | } |
1372 | |
1373 | } |
1374 | |
1375 | /** |
1376 | * ixgbe_set_rxpba_82598 - Initialize RX packet buffer |
1377 | * @hw: pointer to hardware structure |
1378 | * @num_pb: number of packet buffers to allocate |
1379 | * @headroom: reserve n KB of headroom |
1380 | * @strategy: packet buffer allocation strategy |
1381 | **/ |
1382 | static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, |
1383 | u32 headroom, int strategy) |
1384 | { |
1385 | u32 rxpktsize = IXGBE_RXPBSIZE_64KB; |
1386 | u8 i = 0; |
1387 | UNREFERENCED_1PARAMETER(headroom); |
1388 | |
1389 | if (!num_pb) |
1390 | return; |
1391 | |
1392 | /* Setup Rx packet buffer sizes */ |
1393 | switch (strategy) { |
1394 | case PBA_STRATEGY_WEIGHTED: |
1395 | /* Setup the first four at 80KB */ |
1396 | rxpktsize = IXGBE_RXPBSIZE_80KB; |
1397 | for (; i < 4; i++) |
1398 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); |
1399 | /* Setup the last four at 48KB...don't re-init i */ |
1400 | rxpktsize = IXGBE_RXPBSIZE_48KB; |
1401 | /* Fall Through */ |
1402 | case PBA_STRATEGY_EQUAL: |
1403 | default: |
1404 | /* Divide the remaining Rx packet buffer evenly among the TCs */ |
1405 | for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) |
1406 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); |
1407 | break; |
1408 | } |
1409 | |
1410 | /* Setup Tx packet buffer sizes */ |
1411 | for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) |
1412 | IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); |
1413 | |
1414 | return; |
1415 | } |
1416 | |