1/******************************************************************************
2
3 Copyright (c) 2001-2013, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 251964 2013-06-18 21:28:19Z jfv $*/
34/*$NetBSD: ixgbe_common.c,v 1.7 2016/02/06 02:40:49 riastradh Exp $*/
35
36#include "ixgbe_common.h"
37#include "ixgbe_phy.h"
38#include "ixgbe_dcb.h"
39#include "ixgbe_dcb_82599.h"
40#include "ixgbe_api.h"
41
42static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
43static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
44static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
45static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
46static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
47static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
48 u16 count);
49static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
50static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
52static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
53
54static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
55static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
56 u16 *san_mac_offset);
57static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
60 u16 words, u16 *data);
61static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
62 u16 offset);
63
64/**
65 * ixgbe_init_ops_generic - Inits function ptrs
66 * @hw: pointer to the hardware structure
67 *
68 * Initialize the function pointers.
69 **/
70s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
71{
72 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
73 struct ixgbe_mac_info *mac = &hw->mac;
74 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
75
76 DEBUGFUNC("ixgbe_init_ops_generic");
77
78 /* EEPROM */
79 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
80 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
81 if (eec & IXGBE_EEC_PRES) {
82 eeprom->ops.read = &ixgbe_read_eerd_generic;
83 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
84 } else {
85 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
86 eeprom->ops.read_buffer =
87 &ixgbe_read_eeprom_buffer_bit_bang_generic;
88 }
89 eeprom->ops.write = &ixgbe_write_eeprom_generic;
90 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
91 eeprom->ops.validate_checksum =
92 &ixgbe_validate_eeprom_checksum_generic;
93 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
94 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
95
96 /* MAC */
97 mac->ops.init_hw = &ixgbe_init_hw_generic;
98 mac->ops.reset_hw = NULL;
99 mac->ops.start_hw = &ixgbe_start_hw_generic;
100 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
101 mac->ops.get_media_type = NULL;
102 mac->ops.get_supported_physical_layer = NULL;
103 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
104 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
105 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
106 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
107 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
108 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
109 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
110
111 /* LEDs */
112 mac->ops.led_on = &ixgbe_led_on_generic;
113 mac->ops.led_off = &ixgbe_led_off_generic;
114 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
116
117 /* RAR, Multicast, VLAN */
118 mac->ops.set_rar = &ixgbe_set_rar_generic;
119 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
120 mac->ops.insert_mac_addr = NULL;
121 mac->ops.set_vmdq = NULL;
122 mac->ops.clear_vmdq = NULL;
123 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
124 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
125 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
126 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
127 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
128 mac->ops.clear_vfta = NULL;
129 mac->ops.set_vfta = NULL;
130 mac->ops.set_vlvf = NULL;
131 mac->ops.init_uta_tables = NULL;
132
133 /* Flow Control */
134 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
135
136 /* Link */
137 mac->ops.get_link_capabilities = NULL;
138 mac->ops.setup_link = NULL;
139 mac->ops.check_link = NULL;
140 mac->ops.dmac_config = NULL;
141 mac->ops.dmac_update_tcs = NULL;
142 mac->ops.dmac_config_tcs = NULL;
143
144 return IXGBE_SUCCESS;
145}
146
147/**
148 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
149 * of flow control
150 * @hw: pointer to hardware structure
151 *
152 * This function returns TRUE if the device supports flow control
153 * autonegotiation, and FALSE if it does not.
154 *
155 **/
156bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
157{
158 bool supported = FALSE;
159 ixgbe_link_speed speed;
160 bool link_up;
161
162 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
163
164 switch (hw->phy.media_type) {
165 case ixgbe_media_type_fiber_fixed:
166 case ixgbe_media_type_fiber:
167 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
168 /* if link is down, assume supported */
169 if (link_up)
170 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
171 TRUE : FALSE;
172 else
173 supported = TRUE;
174 break;
175 case ixgbe_media_type_backplane:
176 supported = TRUE;
177 break;
178 case ixgbe_media_type_copper:
179 /* only some copper devices support flow control autoneg */
180 switch (hw->device_id) {
181 case IXGBE_DEV_ID_82599_T3_LOM:
182 case IXGBE_DEV_ID_X540T:
183 case IXGBE_DEV_ID_X540_BYPASS:
184 supported = TRUE;
185 break;
186 default:
187 supported = FALSE;
188 }
189 default:
190 break;
191 }
192
193 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
194 "Device %x does not support flow control autoneg",
195 hw->device_id);
196 return supported;
197}
198
199/**
200 * ixgbe_setup_fc - Set up flow control
201 * @hw: pointer to hardware structure
202 *
203 * Called at init time to set up flow control.
204 **/
205static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
206{
207 s32 ret_val = IXGBE_SUCCESS;
208 u32 reg = 0, reg_bp = 0;
209 u16 reg_cu = 0;
210 bool got_lock = FALSE;
211
212 DEBUGFUNC("ixgbe_setup_fc");
213
214 /*
215 * Validate the requested mode. Strict IEEE mode does not allow
216 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
217 */
218 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
219 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
220 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
221 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
222 goto out;
223 }
224
225 /*
226 * 10gig parts do not have a word in the EEPROM to determine the
227 * default flow control setting, so we explicitly set it to full.
228 */
229 if (hw->fc.requested_mode == ixgbe_fc_default)
230 hw->fc.requested_mode = ixgbe_fc_full;
231
232 /*
233 * Set up the 1G and 10G flow control advertisement registers so the
234 * HW will be able to do fc autoneg once the cable is plugged in. If
235 * we link at 10G, the 1G advertisement is harmless and vice versa.
236 */
237 switch (hw->phy.media_type) {
238 case ixgbe_media_type_fiber_fixed:
239 case ixgbe_media_type_fiber:
240 case ixgbe_media_type_backplane:
241 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
242 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
243 break;
244 case ixgbe_media_type_copper:
245 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
246 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
247 break;
248 default:
249 break;
250 }
251
252 /*
253 * The possible values of fc.requested_mode are:
254 * 0: Flow control is completely disabled
255 * 1: Rx flow control is enabled (we can receive pause frames,
256 * but not send pause frames).
257 * 2: Tx flow control is enabled (we can send pause frames but
258 * we do not support receiving pause frames).
259 * 3: Both Rx and Tx flow control (symmetric) are enabled.
260 * other: Invalid.
261 */
262 switch (hw->fc.requested_mode) {
263 case ixgbe_fc_none:
264 /* Flow control completely disabled by software override. */
265 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
266 if (hw->phy.media_type == ixgbe_media_type_backplane)
267 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
268 IXGBE_AUTOC_ASM_PAUSE);
269 else if (hw->phy.media_type == ixgbe_media_type_copper)
270 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
271 break;
272 case ixgbe_fc_tx_pause:
273 /*
274 * Tx Flow control is enabled, and Rx Flow control is
275 * disabled by software override.
276 */
277 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
278 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
279 if (hw->phy.media_type == ixgbe_media_type_backplane) {
280 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
281 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
282 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
283 reg_cu |= IXGBE_TAF_ASM_PAUSE;
284 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
285 }
286 break;
287 case ixgbe_fc_rx_pause:
288 /*
289 * Rx Flow control is enabled and Tx Flow control is
290 * disabled by software override. Since there really
291 * isn't a way to advertise that we are capable of RX
292 * Pause ONLY, we will advertise that we support both
293 * symmetric and asymmetric Rx PAUSE, as such we fall
294 * through to the fc_full statement. Later, we will
295 * disable the adapter's ability to send PAUSE frames.
296 */
297 case ixgbe_fc_full:
298 /* Flow control (both Rx and Tx) is enabled by SW override. */
299 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
300 if (hw->phy.media_type == ixgbe_media_type_backplane)
301 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
302 IXGBE_AUTOC_ASM_PAUSE;
303 else if (hw->phy.media_type == ixgbe_media_type_copper)
304 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
305 break;
306 default:
307 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
308 "Flow control param set incorrectly\n");
309 ret_val = IXGBE_ERR_CONFIG;
310 goto out;
311 break;
312 }
313
314 if (hw->mac.type != ixgbe_mac_X540) {
315 /*
316 * Enable auto-negotiation between the MAC & PHY;
317 * the MAC will advertise clause 37 flow control.
318 */
319 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
320 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
321
322 /* Disable AN timeout */
323 if (hw->fc.strict_ieee)
324 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
325
326 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
327 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
328 }
329
330 /*
331 * AUTOC restart handles negotiation of 1G and 10G on backplane
332 * and copper. There is no need to set the PCS1GCTL register.
333 *
334 */
335 if (hw->phy.media_type == ixgbe_media_type_backplane) {
336 reg_bp |= IXGBE_AUTOC_AN_RESTART;
337 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
338 * LESM is on, likewise reset_pipeline requries the lock as
339 * it also writes AUTOC.
340 */
341 if ((hw->mac.type == ixgbe_mac_82599EB) &&
342 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
343 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
344 IXGBE_GSSR_MAC_CSR_SM);
345 if (ret_val != IXGBE_SUCCESS) {
346 ret_val = IXGBE_ERR_SWFW_SYNC;
347 goto out;
348 }
349 got_lock = TRUE;
350 }
351
352 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
353 if (hw->mac.type == ixgbe_mac_82599EB)
354 ixgbe_reset_pipeline_82599(hw);
355
356 if (got_lock)
357 hw->mac.ops.release_swfw_sync(hw,
358 IXGBE_GSSR_MAC_CSR_SM);
359 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
360 (ixgbe_device_supports_autoneg_fc(hw))) {
361 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
362 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
363 }
364
365 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
366out:
367 return ret_val;
368}
369
370/**
371 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
372 * @hw: pointer to hardware structure
373 *
374 * Starts the hardware by filling the bus info structure and media type, clears
375 * all on chip counters, initializes receive address registers, multicast
376 * table, VLAN filter table, calls routine to set up link and flow control
377 * settings, and leaves transmit and receive units disabled and uninitialized
378 **/
379s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
380{
381 s32 ret_val;
382 u32 ctrl_ext;
383
384 DEBUGFUNC("ixgbe_start_hw_generic");
385
386 /* Set the media type */
387 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
388
389 /* PHY ops initialization must be done in reset_hw() */
390
391 /* Clear the VLAN filter table */
392 hw->mac.ops.clear_vfta(hw);
393
394 /* Clear statistics registers */
395 hw->mac.ops.clear_hw_cntrs(hw);
396
397 /* Set No Snoop Disable */
398 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
399 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
400 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
401 IXGBE_WRITE_FLUSH(hw);
402
403 /* Setup flow control */
404 ret_val = ixgbe_setup_fc(hw);
405 if (ret_val != IXGBE_SUCCESS)
406 goto out;
407
408 /* Clear adapter stopped flag */
409 hw->adapter_stopped = FALSE;
410
411out:
412 return ret_val;
413}
414
415/**
416 * ixgbe_start_hw_gen2 - Init sequence for common device family
417 * @hw: pointer to hw structure
418 *
419 * Performs the init sequence common to the second generation
420 * of 10 GbE devices.
421 * Devices in the second generation:
422 * 82599
423 * X540
424 **/
425s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
426{
427 u32 i;
428 u32 regval;
429
430 /* Clear the rate limiters */
431 for (i = 0; i < hw->mac.max_tx_queues; i++) {
432 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
433 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
434 }
435 IXGBE_WRITE_FLUSH(hw);
436
437 /* Disable relaxed ordering */
438 for (i = 0; i < hw->mac.max_tx_queues; i++) {
439 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
440 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
441 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
442 }
443
444 for (i = 0; i < hw->mac.max_rx_queues; i++) {
445 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
446 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
447 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
448 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
449 }
450
451 return IXGBE_SUCCESS;
452}
453
454/**
455 * ixgbe_init_hw_generic - Generic hardware initialization
456 * @hw: pointer to hardware structure
457 *
458 * Initialize the hardware by resetting the hardware, filling the bus info
459 * structure and media type, clears all on chip counters, initializes receive
460 * address registers, multicast table, VLAN filter table, calls routine to set
461 * up link and flow control settings, and leaves transmit and receive units
462 * disabled and uninitialized
463 **/
464s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
465{
466 s32 status;
467
468 DEBUGFUNC("ixgbe_init_hw_generic");
469
470 /* Reset the hardware */
471 status = hw->mac.ops.reset_hw(hw);
472
473 if (status == IXGBE_SUCCESS) {
474 /* Start the HW */
475 status = hw->mac.ops.start_hw(hw);
476 }
477
478 return status;
479}
480
481/**
482 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
483 * @hw: pointer to hardware structure
484 *
485 * Clears all hardware statistics counters by reading them from the hardware
486 * Statistics counters are clear on read.
487 **/
488s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
489{
490 u16 i = 0;
491
492 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
493
494 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
495 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
496 IXGBE_READ_REG(hw, IXGBE_ERRBC);
497 IXGBE_READ_REG(hw, IXGBE_MSPDC);
498 for (i = 0; i < 8; i++)
499 IXGBE_READ_REG(hw, IXGBE_MPC(i));
500
501 IXGBE_READ_REG(hw, IXGBE_MLFC);
502 IXGBE_READ_REG(hw, IXGBE_MRFC);
503 IXGBE_READ_REG(hw, IXGBE_RLEC);
504 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
505 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
506 if (hw->mac.type >= ixgbe_mac_82599EB) {
507 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
508 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
509 } else {
510 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
511 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
512 }
513
514 for (i = 0; i < 8; i++) {
515 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
516 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
517 if (hw->mac.type >= ixgbe_mac_82599EB) {
518 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
519 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
520 } else {
521 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
522 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
523 }
524 }
525 if (hw->mac.type >= ixgbe_mac_82599EB)
526 for (i = 0; i < 8; i++)
527 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
528 IXGBE_READ_REG(hw, IXGBE_PRC64);
529 IXGBE_READ_REG(hw, IXGBE_PRC127);
530 IXGBE_READ_REG(hw, IXGBE_PRC255);
531 IXGBE_READ_REG(hw, IXGBE_PRC511);
532 IXGBE_READ_REG(hw, IXGBE_PRC1023);
533 IXGBE_READ_REG(hw, IXGBE_PRC1522);
534 IXGBE_READ_REG(hw, IXGBE_GPRC);
535 IXGBE_READ_REG(hw, IXGBE_BPRC);
536 IXGBE_READ_REG(hw, IXGBE_MPRC);
537 IXGBE_READ_REG(hw, IXGBE_GPTC);
538 IXGBE_READ_REG(hw, IXGBE_GORCL);
539 IXGBE_READ_REG(hw, IXGBE_GORCH);
540 IXGBE_READ_REG(hw, IXGBE_GOTCL);
541 IXGBE_READ_REG(hw, IXGBE_GOTCH);
542 if (hw->mac.type == ixgbe_mac_82598EB)
543 for (i = 0; i < 8; i++)
544 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
545 IXGBE_READ_REG(hw, IXGBE_RUC);
546 IXGBE_READ_REG(hw, IXGBE_RFC);
547 IXGBE_READ_REG(hw, IXGBE_ROC);
548 IXGBE_READ_REG(hw, IXGBE_RJC);
549 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
550 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
551 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
552 IXGBE_READ_REG(hw, IXGBE_TORL);
553 IXGBE_READ_REG(hw, IXGBE_TORH);
554 IXGBE_READ_REG(hw, IXGBE_TPR);
555 IXGBE_READ_REG(hw, IXGBE_TPT);
556 IXGBE_READ_REG(hw, IXGBE_PTC64);
557 IXGBE_READ_REG(hw, IXGBE_PTC127);
558 IXGBE_READ_REG(hw, IXGBE_PTC255);
559 IXGBE_READ_REG(hw, IXGBE_PTC511);
560 IXGBE_READ_REG(hw, IXGBE_PTC1023);
561 IXGBE_READ_REG(hw, IXGBE_PTC1522);
562 IXGBE_READ_REG(hw, IXGBE_MPTC);
563 IXGBE_READ_REG(hw, IXGBE_BPTC);
564 for (i = 0; i < 16; i++) {
565 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
566 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
567 if (hw->mac.type >= ixgbe_mac_82599EB) {
568 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
569 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
570 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
571 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
572 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
573 } else {
574 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
575 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
576 }
577 }
578
579 if (hw->mac.type == ixgbe_mac_X540) {
580 if (hw->phy.id == 0)
581 ixgbe_identify_phy(hw);
582 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
583 IXGBE_MDIO_PCS_DEV_TYPE, &i);
584 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
585 IXGBE_MDIO_PCS_DEV_TYPE, &i);
586 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
587 IXGBE_MDIO_PCS_DEV_TYPE, &i);
588 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
589 IXGBE_MDIO_PCS_DEV_TYPE, &i);
590 }
591
592 return IXGBE_SUCCESS;
593}
594
595/**
596 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
597 * @hw: pointer to hardware structure
598 * @pba_num: stores the part number string from the EEPROM
599 * @pba_num_size: part number string buffer length
600 *
601 * Reads the part number string from the EEPROM.
602 **/
603s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
604 u32 pba_num_size)
605{
606 s32 ret_val;
607 u16 data;
608 u16 pba_ptr;
609 u16 offset;
610 u16 length;
611
612 DEBUGFUNC("ixgbe_read_pba_string_generic");
613
614 if (pba_num == NULL) {
615 DEBUGOUT("PBA string buffer was null\n");
616 return IXGBE_ERR_INVALID_ARGUMENT;
617 }
618
619 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
620 if (ret_val) {
621 DEBUGOUT("NVM Read Error\n");
622 return ret_val;
623 }
624
625 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
626 if (ret_val) {
627 DEBUGOUT("NVM Read Error\n");
628 return ret_val;
629 }
630
631 /*
632 * if data is not ptr guard the PBA must be in legacy format which
633 * means pba_ptr is actually our second data word for the PBA number
634 * and we can decode it into an ascii string
635 */
636 if (data != IXGBE_PBANUM_PTR_GUARD) {
637 DEBUGOUT("NVM PBA number is not stored as string\n");
638
639 /* we will need 11 characters to store the PBA */
640 if (pba_num_size < 11) {
641 DEBUGOUT("PBA string buffer too small\n");
642 return IXGBE_ERR_NO_SPACE;
643 }
644
645 /* extract hex string from data and pba_ptr */
646 pba_num[0] = (data >> 12) & 0xF;
647 pba_num[1] = (data >> 8) & 0xF;
648 pba_num[2] = (data >> 4) & 0xF;
649 pba_num[3] = data & 0xF;
650 pba_num[4] = (pba_ptr >> 12) & 0xF;
651 pba_num[5] = (pba_ptr >> 8) & 0xF;
652 pba_num[6] = '-';
653 pba_num[7] = 0;
654 pba_num[8] = (pba_ptr >> 4) & 0xF;
655 pba_num[9] = pba_ptr & 0xF;
656
657 /* put a null character on the end of our string */
658 pba_num[10] = '\0';
659
660 /* switch all the data but the '-' to hex char */
661 for (offset = 0; offset < 10; offset++) {
662 if (pba_num[offset] < 0xA)
663 pba_num[offset] += '0';
664 else if (pba_num[offset] < 0x10)
665 pba_num[offset] += 'A' - 0xA;
666 }
667
668 return IXGBE_SUCCESS;
669 }
670
671 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
672 if (ret_val) {
673 DEBUGOUT("NVM Read Error\n");
674 return ret_val;
675 }
676
677 if (length == 0xFFFF || length == 0) {
678 DEBUGOUT("NVM PBA number section invalid length\n");
679 return IXGBE_ERR_PBA_SECTION;
680 }
681
682 /* check if pba_num buffer is big enough */
683 if (pba_num_size < (((u32)length * 2) - 1)) {
684 DEBUGOUT("PBA string buffer too small\n");
685 return IXGBE_ERR_NO_SPACE;
686 }
687
688 /* trim pba length from start of string */
689 pba_ptr++;
690 length--;
691
692 for (offset = 0; offset < length; offset++) {
693 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
694 if (ret_val) {
695 DEBUGOUT("NVM Read Error\n");
696 return ret_val;
697 }
698 pba_num[offset * 2] = (u8)(data >> 8);
699 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
700 }
701 pba_num[offset * 2] = '\0';
702
703 return IXGBE_SUCCESS;
704}
705
706/**
707 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
708 * @hw: pointer to hardware structure
709 * @pba_num: stores the part number from the EEPROM
710 *
711 * Reads the part number from the EEPROM.
712 **/
713s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
714{
715 s32 ret_val;
716 u16 data;
717
718 DEBUGFUNC("ixgbe_read_pba_num_generic");
719
720 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
721 if (ret_val) {
722 DEBUGOUT("NVM Read Error\n");
723 return ret_val;
724 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
725 DEBUGOUT("NVM Not supported\n");
726 return IXGBE_NOT_IMPLEMENTED;
727 }
728 *pba_num = (u32)(data << 16);
729
730 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
731 if (ret_val) {
732 DEBUGOUT("NVM Read Error\n");
733 return ret_val;
734 }
735 *pba_num |= data;
736
737 return IXGBE_SUCCESS;
738}
739
740/**
741 * ixgbe_read_pba_raw
742 * @hw: pointer to the HW structure
743 * @eeprom_buf: optional pointer to EEPROM image
744 * @eeprom_buf_size: size of EEPROM image in words
745 * @max_pba_block_size: PBA block size limit
746 * @pba: pointer to output PBA structure
747 *
748 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
749 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
750 *
751 **/
752s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
753 u32 eeprom_buf_size, u16 max_pba_block_size,
754 struct ixgbe_pba *pba)
755{
756 s32 ret_val;
757 u16 pba_block_size;
758
759 if (pba == NULL)
760 return IXGBE_ERR_PARAM;
761
762 if (eeprom_buf == NULL) {
763 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
764 &pba->word[0]);
765 if (ret_val)
766 return ret_val;
767 } else {
768 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
769 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
770 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
771 } else {
772 return IXGBE_ERR_PARAM;
773 }
774 }
775
776 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
777 if (pba->pba_block == NULL)
778 return IXGBE_ERR_PARAM;
779
780 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
781 eeprom_buf_size,
782 &pba_block_size);
783 if (ret_val)
784 return ret_val;
785
786 if (pba_block_size > max_pba_block_size)
787 return IXGBE_ERR_PARAM;
788
789 if (eeprom_buf == NULL) {
790 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
791 pba_block_size,
792 pba->pba_block);
793 if (ret_val)
794 return ret_val;
795 } else {
796 if (eeprom_buf_size > (u32)(pba->word[1] +
797 pba->pba_block[0])) {
798 memcpy(pba->pba_block,
799 &eeprom_buf[pba->word[1]],
800 pba_block_size * sizeof(u16));
801 } else {
802 return IXGBE_ERR_PARAM;
803 }
804 }
805 }
806
807 return IXGBE_SUCCESS;
808}
809
810/**
811 * ixgbe_write_pba_raw
812 * @hw: pointer to the HW structure
813 * @eeprom_buf: optional pointer to EEPROM image
814 * @eeprom_buf_size: size of EEPROM image in words
815 * @pba: pointer to PBA structure
816 *
817 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
818 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
819 *
820 **/
821s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
822 u32 eeprom_buf_size, struct ixgbe_pba *pba)
823{
824 s32 ret_val;
825
826 if (pba == NULL)
827 return IXGBE_ERR_PARAM;
828
829 if (eeprom_buf == NULL) {
830 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
831 &pba->word[0]);
832 if (ret_val)
833 return ret_val;
834 } else {
835 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
836 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
837 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
838 } else {
839 return IXGBE_ERR_PARAM;
840 }
841 }
842
843 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
844 if (pba->pba_block == NULL)
845 return IXGBE_ERR_PARAM;
846
847 if (eeprom_buf == NULL) {
848 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
849 pba->pba_block[0],
850 pba->pba_block);
851 if (ret_val)
852 return ret_val;
853 } else {
854 if (eeprom_buf_size > (u32)(pba->word[1] +
855 pba->pba_block[0])) {
856 memcpy(&eeprom_buf[pba->word[1]],
857 pba->pba_block,
858 pba->pba_block[0] * sizeof(u16));
859 } else {
860 return IXGBE_ERR_PARAM;
861 }
862 }
863 }
864
865 return IXGBE_SUCCESS;
866}
867
868/**
869 * ixgbe_get_pba_block_size
870 * @hw: pointer to the HW structure
871 * @eeprom_buf: optional pointer to EEPROM image
872 * @eeprom_buf_size: size of EEPROM image in words
873 * @pba_data_size: pointer to output variable
874 *
875 * Returns the size of the PBA block in words. Function operates on EEPROM
876 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
877 * EEPROM device.
878 *
879 **/
880s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
881 u32 eeprom_buf_size, u16 *pba_block_size)
882{
883 s32 ret_val;
884 u16 pba_word[2];
885 u16 length;
886
887 DEBUGFUNC("ixgbe_get_pba_block_size");
888
889 if (eeprom_buf == NULL) {
890 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
891 &pba_word[0]);
892 if (ret_val)
893 return ret_val;
894 } else {
895 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
896 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
897 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
898 } else {
899 return IXGBE_ERR_PARAM;
900 }
901 }
902
903 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
904 if (eeprom_buf == NULL) {
905 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
906 &length);
907 if (ret_val)
908 return ret_val;
909 } else {
910 if (eeprom_buf_size > pba_word[1])
911 length = eeprom_buf[pba_word[1] + 0];
912 else
913 return IXGBE_ERR_PARAM;
914 }
915
916 if (length == 0xFFFF || length == 0)
917 return IXGBE_ERR_PBA_SECTION;
918 } else {
919 /* PBA number in legacy format, there is no PBA Block. */
920 length = 0;
921 }
922
923 if (pba_block_size != NULL)
924 *pba_block_size = length;
925
926 return IXGBE_SUCCESS;
927}
928
929/**
930 * ixgbe_get_mac_addr_generic - Generic get MAC address
931 * @hw: pointer to hardware structure
932 * @mac_addr: Adapter MAC address
933 *
934 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
935 * A reset of the adapter must be performed prior to calling this function
936 * in order for the MAC address to have been loaded from the EEPROM into RAR0
937 **/
938s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
939{
940 u32 rar_high;
941 u32 rar_low;
942 u16 i;
943
944 DEBUGFUNC("ixgbe_get_mac_addr_generic");
945
946 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
947 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
948
949 for (i = 0; i < 4; i++)
950 mac_addr[i] = (u8)(rar_low >> (i*8));
951
952 for (i = 0; i < 2; i++)
953 mac_addr[i+4] = (u8)(rar_high >> (i*8));
954
955 return IXGBE_SUCCESS;
956}
957
958/**
959 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
960 * @hw: pointer to hardware structure
961 * @link_status: the link status returned by the PCI config space
962 *
963 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
964 **/
965void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
966{
967 struct ixgbe_mac_info *mac = &hw->mac;
968
969 hw->bus.type = ixgbe_bus_type_pci_express;
970
971 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
972 case IXGBE_PCI_LINK_WIDTH_1:
973 hw->bus.width = ixgbe_bus_width_pcie_x1;
974 break;
975 case IXGBE_PCI_LINK_WIDTH_2:
976 hw->bus.width = ixgbe_bus_width_pcie_x2;
977 break;
978 case IXGBE_PCI_LINK_WIDTH_4:
979 hw->bus.width = ixgbe_bus_width_pcie_x4;
980 break;
981 case IXGBE_PCI_LINK_WIDTH_8:
982 hw->bus.width = ixgbe_bus_width_pcie_x8;
983 break;
984 default:
985 hw->bus.width = ixgbe_bus_width_unknown;
986 break;
987 }
988
989 switch (link_status & IXGBE_PCI_LINK_SPEED) {
990 case IXGBE_PCI_LINK_SPEED_2500:
991 hw->bus.speed = ixgbe_bus_speed_2500;
992 break;
993 case IXGBE_PCI_LINK_SPEED_5000:
994 hw->bus.speed = ixgbe_bus_speed_5000;
995 break;
996 case IXGBE_PCI_LINK_SPEED_8000:
997 hw->bus.speed = ixgbe_bus_speed_8000;
998 break;
999 default:
1000 hw->bus.speed = ixgbe_bus_speed_unknown;
1001 break;
1002 }
1003
1004 mac->ops.set_lan_id(hw);
1005}
1006
1007/**
1008 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1009 * @hw: pointer to hardware structure
1010 *
1011 * Gets the PCI bus info (speed, width, type) then calls helper function to
1012 * store this data within the ixgbe_hw structure.
1013 **/
1014s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1015{
1016 u16 link_status;
1017
1018 DEBUGFUNC("ixgbe_get_bus_info_generic");
1019
1020 /* Get the negotiated link width and speed from PCI config space */
1021 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1022
1023 ixgbe_set_pci_config_data_generic(hw, link_status);
1024
1025 return IXGBE_SUCCESS;
1026}
1027
1028/**
1029 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1030 * @hw: pointer to the HW structure
1031 *
1032 * Determines the LAN function id by reading memory-mapped registers
1033 * and swaps the port value if requested.
1034 **/
1035void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1036{
1037 struct ixgbe_bus_info *bus = &hw->bus;
1038 u32 reg;
1039
1040 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1041
1042 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1043 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1044 bus->lan_id = bus->func;
1045
1046 /* check for a port swap */
1047 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
1048 if (reg & IXGBE_FACTPS_LFS)
1049 bus->func ^= 0x1;
1050}
1051
1052/**
1053 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1054 * @hw: pointer to hardware structure
1055 *
1056 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1057 * disables transmit and receive units. The adapter_stopped flag is used by
1058 * the shared code and drivers to determine if the adapter is in a stopped
1059 * state and should not touch the hardware.
1060 **/
1061s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1062{
1063 u32 reg_val;
1064 u16 i;
1065
1066 DEBUGFUNC("ixgbe_stop_adapter_generic");
1067
1068 /*
1069 * Set the adapter_stopped flag so other driver functions stop touching
1070 * the hardware
1071 */
1072 hw->adapter_stopped = TRUE;
1073
1074 /* Disable the receive unit */
1075 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1076
1077 /* Clear interrupt mask to stop interrupts from being generated */
1078 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1079
1080 /* Clear any pending interrupts, flush previous writes */
1081 IXGBE_READ_REG(hw, IXGBE_EICR);
1082
1083 /* Disable the transmit unit. Each queue must be disabled. */
1084 for (i = 0; i < hw->mac.max_tx_queues; i++)
1085 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1086
1087 /* Disable the receive unit by stopping each queue */
1088 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1089 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1090 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1091 reg_val |= IXGBE_RXDCTL_SWFLSH;
1092 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1093 }
1094
1095 /* flush all queues disables */
1096 IXGBE_WRITE_FLUSH(hw);
1097 msec_delay(2);
1098
1099 /*
1100 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1101 * access and verify no pending requests
1102 */
1103 return ixgbe_disable_pcie_master(hw);
1104}
1105
1106/**
1107 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1108 * @hw: pointer to hardware structure
1109 * @index: led number to turn on
1110 **/
1111s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1112{
1113 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1114
1115 DEBUGFUNC("ixgbe_led_on_generic");
1116
1117 /* To turn on the LED, set mode to ON. */
1118 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1119 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1120 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1121 IXGBE_WRITE_FLUSH(hw);
1122
1123 return IXGBE_SUCCESS;
1124}
1125
1126/**
1127 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1128 * @hw: pointer to hardware structure
1129 * @index: led number to turn off
1130 **/
1131s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1132{
1133 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1134
1135 DEBUGFUNC("ixgbe_led_off_generic");
1136
1137 /* To turn off the LED, set mode to OFF. */
1138 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1139 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1140 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1141 IXGBE_WRITE_FLUSH(hw);
1142
1143 return IXGBE_SUCCESS;
1144}
1145
1146/**
1147 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1148 * @hw: pointer to hardware structure
1149 *
1150 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1151 * ixgbe_hw struct in order to set up EEPROM access.
1152 **/
1153s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1154{
1155 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1156 u32 eec;
1157 u16 eeprom_size;
1158
1159 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1160
1161 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1162 eeprom->type = ixgbe_eeprom_none;
1163 /* Set default semaphore delay to 10ms which is a well
1164 * tested value */
1165 eeprom->semaphore_delay = 10;
1166 /* Clear EEPROM page size, it will be initialized as needed */
1167 eeprom->word_page_size = 0;
1168
1169 /*
1170 * Check for EEPROM present first.
1171 * If not present leave as none
1172 */
1173 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1174 if (eec & IXGBE_EEC_PRES) {
1175 eeprom->type = ixgbe_eeprom_spi;
1176
1177 /*
1178 * SPI EEPROM is assumed here. This code would need to
1179 * change if a future EEPROM is not SPI.
1180 */
1181 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1182 IXGBE_EEC_SIZE_SHIFT);
1183 eeprom->word_size = 1 << (eeprom_size +
1184 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1185 }
1186
1187 if (eec & IXGBE_EEC_ADDR_SIZE)
1188 eeprom->address_bits = 16;
1189 else
1190 eeprom->address_bits = 8;
1191 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1192 "%d\n", eeprom->type, eeprom->word_size,
1193 eeprom->address_bits);
1194 }
1195
1196 return IXGBE_SUCCESS;
1197}
1198
1199/**
1200 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1201 * @hw: pointer to hardware structure
1202 * @offset: offset within the EEPROM to write
1203 * @words: number of word(s)
1204 * @data: 16 bit word(s) to write to EEPROM
1205 *
1206 * Reads 16 bit word(s) from EEPROM through bit-bang method
1207 **/
1208s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1209 u16 words, u16 *data)
1210{
1211 s32 status = IXGBE_SUCCESS;
1212 u16 i, count;
1213
1214 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1215
1216 hw->eeprom.ops.init_params(hw);
1217
1218 if (words == 0) {
1219 status = IXGBE_ERR_INVALID_ARGUMENT;
1220 goto out;
1221 }
1222
1223 if (offset + words > hw->eeprom.word_size) {
1224 status = IXGBE_ERR_EEPROM;
1225 goto out;
1226 }
1227
1228 /*
1229 * The EEPROM page size cannot be queried from the chip. We do lazy
1230 * initialization. It is worth to do that when we write large buffer.
1231 */
1232 if ((hw->eeprom.word_page_size == 0) &&
1233 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1234 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1235
1236 /*
1237 * We cannot hold synchronization semaphores for too long
1238 * to avoid other entity starvation. However it is more efficient
1239 * to read in bursts than synchronizing access for each word.
1240 */
1241 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1242 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1243 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1244 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1245 count, &data[i]);
1246
1247 if (status != IXGBE_SUCCESS)
1248 break;
1249 }
1250
1251out:
1252 return status;
1253}
1254
1255/**
1256 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1257 * @hw: pointer to hardware structure
1258 * @offset: offset within the EEPROM to be written to
1259 * @words: number of word(s)
1260 * @data: 16 bit word(s) to be written to the EEPROM
1261 *
1262 * If ixgbe_eeprom_update_checksum is not called after this function, the
1263 * EEPROM will most likely contain an invalid checksum.
1264 **/
1265static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1266 u16 words, u16 *data)
1267{
1268 s32 status;
1269 u16 word;
1270 u16 page_size;
1271 u16 i;
1272 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1273
1274 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1275
1276 /* Prepare the EEPROM for writing */
1277 status = ixgbe_acquire_eeprom(hw);
1278
1279 if (status == IXGBE_SUCCESS) {
1280 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1281 ixgbe_release_eeprom(hw);
1282 status = IXGBE_ERR_EEPROM;
1283 }
1284 }
1285
1286 if (status == IXGBE_SUCCESS) {
1287 for (i = 0; i < words; i++) {
1288 ixgbe_standby_eeprom(hw);
1289
1290 /* Send the WRITE ENABLE command (8 bit opcode ) */
1291 ixgbe_shift_out_eeprom_bits(hw,
1292 IXGBE_EEPROM_WREN_OPCODE_SPI,
1293 IXGBE_EEPROM_OPCODE_BITS);
1294
1295 ixgbe_standby_eeprom(hw);
1296
1297 /*
1298 * Some SPI eeproms use the 8th address bit embedded
1299 * in the opcode
1300 */
1301 if ((hw->eeprom.address_bits == 8) &&
1302 ((offset + i) >= 128))
1303 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1304
1305 /* Send the Write command (8-bit opcode + addr) */
1306 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1307 IXGBE_EEPROM_OPCODE_BITS);
1308 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1309 hw->eeprom.address_bits);
1310
1311 page_size = hw->eeprom.word_page_size;
1312
1313 /* Send the data in burst via SPI*/
1314 do {
1315 word = data[i];
1316 word = (word >> 8) | (word << 8);
1317 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1318
1319 if (page_size == 0)
1320 break;
1321
1322 /* do not wrap around page */
1323 if (((offset + i) & (page_size - 1)) ==
1324 (page_size - 1))
1325 break;
1326 } while (++i < words);
1327
1328 ixgbe_standby_eeprom(hw);
1329 msec_delay(10);
1330 }
1331 /* Done with writing - release the EEPROM */
1332 ixgbe_release_eeprom(hw);
1333 }
1334
1335 return status;
1336}
1337
1338/**
1339 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1340 * @hw: pointer to hardware structure
1341 * @offset: offset within the EEPROM to be written to
1342 * @data: 16 bit word to be written to the EEPROM
1343 *
1344 * If ixgbe_eeprom_update_checksum is not called after this function, the
1345 * EEPROM will most likely contain an invalid checksum.
1346 **/
1347s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1348{
1349 s32 status;
1350
1351 DEBUGFUNC("ixgbe_write_eeprom_generic");
1352
1353 hw->eeprom.ops.init_params(hw);
1354
1355 if (offset >= hw->eeprom.word_size) {
1356 status = IXGBE_ERR_EEPROM;
1357 goto out;
1358 }
1359
1360 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1361
1362out:
1363 return status;
1364}
1365
1366/**
1367 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1368 * @hw: pointer to hardware structure
1369 * @offset: offset within the EEPROM to be read
1370 * @data: read 16 bit words(s) from EEPROM
1371 * @words: number of word(s)
1372 *
1373 * Reads 16 bit word(s) from EEPROM through bit-bang method
1374 **/
1375s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1376 u16 words, u16 *data)
1377{
1378 s32 status = IXGBE_SUCCESS;
1379 u16 i, count;
1380
1381 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1382
1383 hw->eeprom.ops.init_params(hw);
1384
1385 if (words == 0) {
1386 status = IXGBE_ERR_INVALID_ARGUMENT;
1387 goto out;
1388 }
1389
1390 if (offset + words > hw->eeprom.word_size) {
1391 status = IXGBE_ERR_EEPROM;
1392 goto out;
1393 }
1394
1395 /*
1396 * We cannot hold synchronization semaphores for too long
1397 * to avoid other entity starvation. However it is more efficient
1398 * to read in bursts than synchronizing access for each word.
1399 */
1400 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1401 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1402 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1403
1404 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1405 count, &data[i]);
1406
1407 if (status != IXGBE_SUCCESS)
1408 break;
1409 }
1410
1411out:
1412 return status;
1413}
1414
1415/**
1416 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1417 * @hw: pointer to hardware structure
1418 * @offset: offset within the EEPROM to be read
1419 * @words: number of word(s)
1420 * @data: read 16 bit word(s) from EEPROM
1421 *
1422 * Reads 16 bit word(s) from EEPROM through bit-bang method
1423 **/
1424static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1425 u16 words, u16 *data)
1426{
1427 s32 status;
1428 u16 word_in;
1429 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1430 u16 i;
1431
1432 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1433
1434 /* Prepare the EEPROM for reading */
1435 status = ixgbe_acquire_eeprom(hw);
1436
1437 if (status == IXGBE_SUCCESS) {
1438 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1439 ixgbe_release_eeprom(hw);
1440 status = IXGBE_ERR_EEPROM;
1441 }
1442 }
1443
1444 if (status == IXGBE_SUCCESS) {
1445 for (i = 0; i < words; i++) {
1446 ixgbe_standby_eeprom(hw);
1447 /*
1448 * Some SPI eeproms use the 8th address bit embedded
1449 * in the opcode
1450 */
1451 if ((hw->eeprom.address_bits == 8) &&
1452 ((offset + i) >= 128))
1453 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1454
1455 /* Send the READ command (opcode + addr) */
1456 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1457 IXGBE_EEPROM_OPCODE_BITS);
1458 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1459 hw->eeprom.address_bits);
1460
1461 /* Read the data. */
1462 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1463 data[i] = (word_in >> 8) | (word_in << 8);
1464 }
1465
1466 /* End this read operation */
1467 ixgbe_release_eeprom(hw);
1468 }
1469
1470 return status;
1471}
1472
1473/**
1474 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1475 * @hw: pointer to hardware structure
1476 * @offset: offset within the EEPROM to be read
1477 * @data: read 16 bit value from EEPROM
1478 *
1479 * Reads 16 bit value from EEPROM through bit-bang method
1480 **/
1481s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1482 u16 *data)
1483{
1484 s32 status;
1485
1486 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1487
1488 hw->eeprom.ops.init_params(hw);
1489
1490 if (offset >= hw->eeprom.word_size) {
1491 status = IXGBE_ERR_EEPROM;
1492 goto out;
1493 }
1494
1495 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1496
1497out:
1498 return status;
1499}
1500
1501/**
1502 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1503 * @hw: pointer to hardware structure
1504 * @offset: offset of word in the EEPROM to read
1505 * @words: number of word(s)
1506 * @data: 16 bit word(s) from the EEPROM
1507 *
1508 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1509 **/
1510s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1511 u16 words, u16 *data)
1512{
1513 u32 eerd;
1514 s32 status = IXGBE_SUCCESS;
1515 u32 i;
1516
1517 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1518
1519 hw->eeprom.ops.init_params(hw);
1520
1521 if (words == 0) {
1522 status = IXGBE_ERR_INVALID_ARGUMENT;
1523 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1524 goto out;
1525 }
1526
1527 if (offset >= hw->eeprom.word_size) {
1528 status = IXGBE_ERR_EEPROM;
1529 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1530 goto out;
1531 }
1532
1533 for (i = 0; i < words; i++) {
1534 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1535 IXGBE_EEPROM_RW_REG_START;
1536
1537 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1538 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1539
1540 if (status == IXGBE_SUCCESS) {
1541 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1542 IXGBE_EEPROM_RW_REG_DATA);
1543 } else {
1544 DEBUGOUT("Eeprom read timed out\n");
1545 goto out;
1546 }
1547 }
1548out:
1549 return status;
1550}
1551
1552/**
1553 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1554 * @hw: pointer to hardware structure
1555 * @offset: offset within the EEPROM to be used as a scratch pad
1556 *
1557 * Discover EEPROM page size by writing marching data at given offset.
1558 * This function is called only when we are writing a new large buffer
1559 * at given offset so the data would be overwritten anyway.
1560 **/
1561static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1562 u16 offset)
1563{
1564 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1565 s32 status = IXGBE_SUCCESS;
1566 u16 i;
1567
1568 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1569
1570 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1571 data[i] = i;
1572
1573 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1574 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1575 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1576 hw->eeprom.word_page_size = 0;
1577 if (status != IXGBE_SUCCESS)
1578 goto out;
1579
1580 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1581 if (status != IXGBE_SUCCESS)
1582 goto out;
1583
1584 /*
1585 * When writing in burst more than the actual page size
1586 * EEPROM address wraps around current page.
1587 */
1588 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1589
1590 DEBUGOUT1("Detected EEPROM page size = %d words.",
1591 hw->eeprom.word_page_size);
1592out:
1593 return status;
1594}
1595
1596/**
1597 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1598 * @hw: pointer to hardware structure
1599 * @offset: offset of word in the EEPROM to read
1600 * @data: word read from the EEPROM
1601 *
1602 * Reads a 16 bit word from the EEPROM using the EERD register.
1603 **/
1604s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1605{
1606 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1607}
1608
1609/**
1610 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1611 * @hw: pointer to hardware structure
1612 * @offset: offset of word in the EEPROM to write
1613 * @words: number of word(s)
1614 * @data: word(s) write to the EEPROM
1615 *
1616 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1617 **/
1618s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1619 u16 words, u16 *data)
1620{
1621 u32 eewr;
1622 s32 status = IXGBE_SUCCESS;
1623 u16 i;
1624
1625 DEBUGFUNC("ixgbe_write_eewr_generic");
1626
1627 hw->eeprom.ops.init_params(hw);
1628
1629 if (words == 0) {
1630 status = IXGBE_ERR_INVALID_ARGUMENT;
1631 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1632 goto out;
1633 }
1634
1635 if (offset >= hw->eeprom.word_size) {
1636 status = IXGBE_ERR_EEPROM;
1637 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1638 goto out;
1639 }
1640
1641 for (i = 0; i < words; i++) {
1642 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1643 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1644 IXGBE_EEPROM_RW_REG_START;
1645
1646 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1647 if (status != IXGBE_SUCCESS) {
1648 DEBUGOUT("Eeprom write EEWR timed out\n");
1649 goto out;
1650 }
1651
1652 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1653
1654 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1655 if (status != IXGBE_SUCCESS) {
1656 DEBUGOUT("Eeprom write EEWR timed out\n");
1657 goto out;
1658 }
1659 }
1660
1661out:
1662 return status;
1663}
1664
1665/**
1666 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1667 * @hw: pointer to hardware structure
1668 * @offset: offset of word in the EEPROM to write
1669 * @data: word write to the EEPROM
1670 *
1671 * Write a 16 bit word to the EEPROM using the EEWR register.
1672 **/
1673s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1674{
1675 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1676}
1677
1678/**
1679 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1680 * @hw: pointer to hardware structure
1681 * @ee_reg: EEPROM flag for polling
1682 *
1683 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1684 * read or write is done respectively.
1685 **/
1686s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1687{
1688 u32 i;
1689 u32 reg;
1690 s32 status = IXGBE_ERR_EEPROM;
1691
1692 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1693
1694 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1695 if (ee_reg == IXGBE_NVM_POLL_READ)
1696 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1697 else
1698 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1699
1700 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1701 status = IXGBE_SUCCESS;
1702 break;
1703 }
1704 usec_delay(5);
1705 }
1706
1707 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1708 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1709 "EEPROM read/write done polling timed out");
1710
1711 return status;
1712}
1713
1714/**
1715 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1716 * @hw: pointer to hardware structure
1717 *
1718 * Prepares EEPROM for access using bit-bang method. This function should
1719 * be called before issuing a command to the EEPROM.
1720 **/
1721static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1722{
1723 s32 status = IXGBE_SUCCESS;
1724 u32 eec;
1725 u32 i;
1726
1727 DEBUGFUNC("ixgbe_acquire_eeprom");
1728
1729 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1730 != IXGBE_SUCCESS)
1731 status = IXGBE_ERR_SWFW_SYNC;
1732
1733 if (status == IXGBE_SUCCESS) {
1734 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1735
1736 /* Request EEPROM Access */
1737 eec |= IXGBE_EEC_REQ;
1738 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1739
1740 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1741 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1742 if (eec & IXGBE_EEC_GNT)
1743 break;
1744 usec_delay(5);
1745 }
1746
1747 /* Release if grant not acquired */
1748 if (!(eec & IXGBE_EEC_GNT)) {
1749 eec &= ~IXGBE_EEC_REQ;
1750 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1751 DEBUGOUT("Could not acquire EEPROM grant\n");
1752
1753 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1754 status = IXGBE_ERR_EEPROM;
1755 }
1756
1757 /* Setup EEPROM for Read/Write */
1758 if (status == IXGBE_SUCCESS) {
1759 /* Clear CS and SK */
1760 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1761 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1762 IXGBE_WRITE_FLUSH(hw);
1763 usec_delay(1);
1764 }
1765 }
1766 return status;
1767}
1768
1769/**
1770 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1771 * @hw: pointer to hardware structure
1772 *
1773 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1774 **/
1775static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1776{
1777 s32 status = IXGBE_ERR_EEPROM;
1778 u32 timeout = 2000;
1779 u32 i;
1780 u32 swsm;
1781
1782 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1783
1784
1785 /* Get SMBI software semaphore between device drivers first */
1786 for (i = 0; i < timeout; i++) {
1787 /*
1788 * If the SMBI bit is 0 when we read it, then the bit will be
1789 * set and we have the semaphore
1790 */
1791 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1792 if (!(swsm & IXGBE_SWSM_SMBI)) {
1793 status = IXGBE_SUCCESS;
1794 break;
1795 }
1796 usec_delay(50);
1797 }
1798
1799 if (i == timeout) {
1800 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1801 "not granted.\n");
1802 /*
1803 * this release is particularly important because our attempts
1804 * above to get the semaphore may have succeeded, and if there
1805 * was a timeout, we should unconditionally clear the semaphore
1806 * bits to free the driver to make progress
1807 */
1808 ixgbe_release_eeprom_semaphore(hw);
1809
1810 usec_delay(50);
1811 /*
1812 * one last try
1813 * If the SMBI bit is 0 when we read it, then the bit will be
1814 * set and we have the semaphore
1815 */
1816 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1817 if (!(swsm & IXGBE_SWSM_SMBI))
1818 status = IXGBE_SUCCESS;
1819 }
1820
1821 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1822 if (status == IXGBE_SUCCESS) {
1823 for (i = 0; i < timeout; i++) {
1824 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1825
1826 /* Set the SW EEPROM semaphore bit to request access */
1827 swsm |= IXGBE_SWSM_SWESMBI;
1828 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1829
1830 /*
1831 * If we set the bit successfully then we got the
1832 * semaphore.
1833 */
1834 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1835 if (swsm & IXGBE_SWSM_SWESMBI)
1836 break;
1837
1838 usec_delay(50);
1839 }
1840
1841 /*
1842 * Release semaphores and return error if SW EEPROM semaphore
1843 * was not granted because we don't have access to the EEPROM
1844 */
1845 if (i >= timeout) {
1846 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1847 "SWESMBI Software EEPROM semaphore not granted.\n");
1848 ixgbe_release_eeprom_semaphore(hw);
1849 status = IXGBE_ERR_EEPROM;
1850 }
1851 } else {
1852 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1853 "Software semaphore SMBI between device drivers "
1854 "not granted.\n");
1855 }
1856
1857 return status;
1858}
1859
1860/**
1861 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1862 * @hw: pointer to hardware structure
1863 *
1864 * This function clears hardware semaphore bits.
1865 **/
1866static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1867{
1868 u32 swsm;
1869
1870 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1871
1872 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1873
1874 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1875 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1876 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1877 IXGBE_WRITE_FLUSH(hw);
1878}
1879
1880/**
1881 * ixgbe_ready_eeprom - Polls for EEPROM ready
1882 * @hw: pointer to hardware structure
1883 **/
1884static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1885{
1886 s32 status = IXGBE_SUCCESS;
1887 u16 i;
1888 u8 spi_stat_reg;
1889
1890 DEBUGFUNC("ixgbe_ready_eeprom");
1891
1892 /*
1893 * Read "Status Register" repeatedly until the LSB is cleared. The
1894 * EEPROM will signal that the command has been completed by clearing
1895 * bit 0 of the internal status register. If it's not cleared within
1896 * 5 milliseconds, then error out.
1897 */
1898 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1899 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1900 IXGBE_EEPROM_OPCODE_BITS);
1901 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1902 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1903 break;
1904
1905 usec_delay(5);
1906 ixgbe_standby_eeprom(hw);
1907 };
1908
1909 /*
1910 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1911 * devices (and only 0-5mSec on 5V devices)
1912 */
1913 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1914 DEBUGOUT("SPI EEPROM Status error\n");
1915 status = IXGBE_ERR_EEPROM;
1916 }
1917
1918 return status;
1919}
1920
1921/**
1922 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1923 * @hw: pointer to hardware structure
1924 **/
1925static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1926{
1927 u32 eec;
1928
1929 DEBUGFUNC("ixgbe_standby_eeprom");
1930
1931 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1932
1933 /* Toggle CS to flush commands */
1934 eec |= IXGBE_EEC_CS;
1935 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1936 IXGBE_WRITE_FLUSH(hw);
1937 usec_delay(1);
1938 eec &= ~IXGBE_EEC_CS;
1939 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1940 IXGBE_WRITE_FLUSH(hw);
1941 usec_delay(1);
1942}
1943
1944/**
1945 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1946 * @hw: pointer to hardware structure
1947 * @data: data to send to the EEPROM
1948 * @count: number of bits to shift out
1949 **/
1950static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1951 u16 count)
1952{
1953 u32 eec;
1954 u32 mask;
1955 u32 i;
1956
1957 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1958
1959 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1960
1961 /*
1962 * Mask is used to shift "count" bits of "data" out to the EEPROM
1963 * one bit at a time. Determine the starting bit based on count
1964 */
1965 mask = 0x01 << (count - 1);
1966
1967 for (i = 0; i < count; i++) {
1968 /*
1969 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1970 * "1", and then raising and then lowering the clock (the SK
1971 * bit controls the clock input to the EEPROM). A "0" is
1972 * shifted out to the EEPROM by setting "DI" to "0" and then
1973 * raising and then lowering the clock.
1974 */
1975 if (data & mask)
1976 eec |= IXGBE_EEC_DI;
1977 else
1978 eec &= ~IXGBE_EEC_DI;
1979
1980 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1981 IXGBE_WRITE_FLUSH(hw);
1982
1983 usec_delay(1);
1984
1985 ixgbe_raise_eeprom_clk(hw, &eec);
1986 ixgbe_lower_eeprom_clk(hw, &eec);
1987
1988 /*
1989 * Shift mask to signify next bit of data to shift in to the
1990 * EEPROM
1991 */
1992 mask = mask >> 1;
1993 };
1994
1995 /* We leave the "DI" bit set to "0" when we leave this routine. */
1996 eec &= ~IXGBE_EEC_DI;
1997 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1998 IXGBE_WRITE_FLUSH(hw);
1999}
2000
2001/**
2002 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2003 * @hw: pointer to hardware structure
2004 **/
2005static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2006{
2007 u32 eec;
2008 u32 i;
2009 u16 data = 0;
2010
2011 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2012
2013 /*
2014 * In order to read a register from the EEPROM, we need to shift
2015 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2016 * the clock input to the EEPROM (setting the SK bit), and then reading
2017 * the value of the "DO" bit. During this "shifting in" process the
2018 * "DI" bit should always be clear.
2019 */
2020 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2021
2022 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2023
2024 for (i = 0; i < count; i++) {
2025 data = data << 1;
2026 ixgbe_raise_eeprom_clk(hw, &eec);
2027
2028 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2029
2030 eec &= ~(IXGBE_EEC_DI);
2031 if (eec & IXGBE_EEC_DO)
2032 data |= 1;
2033
2034 ixgbe_lower_eeprom_clk(hw, &eec);
2035 }
2036
2037 return data;
2038}
2039
2040/**
2041 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2042 * @hw: pointer to hardware structure
2043 * @eec: EEC register's current value
2044 **/
2045static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2046{
2047 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2048
2049 /*
2050 * Raise the clock input to the EEPROM
2051 * (setting the SK bit), then delay
2052 */
2053 *eec = *eec | IXGBE_EEC_SK;
2054 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2055 IXGBE_WRITE_FLUSH(hw);
2056 usec_delay(1);
2057}
2058
2059/**
2060 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2061 * @hw: pointer to hardware structure
2062 * @eecd: EECD's current value
2063 **/
2064static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2065{
2066 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2067
2068 /*
2069 * Lower the clock input to the EEPROM (clearing the SK bit), then
2070 * delay
2071 */
2072 *eec = *eec & ~IXGBE_EEC_SK;
2073 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2074 IXGBE_WRITE_FLUSH(hw);
2075 usec_delay(1);
2076}
2077
2078/**
2079 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2080 * @hw: pointer to hardware structure
2081 **/
2082static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2083{
2084 u32 eec;
2085
2086 DEBUGFUNC("ixgbe_release_eeprom");
2087
2088 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2089
2090 eec |= IXGBE_EEC_CS; /* Pull CS high */
2091 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2092
2093 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2094 IXGBE_WRITE_FLUSH(hw);
2095
2096 usec_delay(1);
2097
2098 /* Stop requesting EEPROM access */
2099 eec &= ~IXGBE_EEC_REQ;
2100 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2101
2102 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2103
2104 /* Delay before attempt to obtain semaphore again to allow FW access */
2105 msec_delay(hw->eeprom.semaphore_delay);
2106}
2107
2108/**
2109 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2110 * @hw: pointer to hardware structure
2111 **/
2112u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2113{
2114 u16 i;
2115 u16 j;
2116 u16 checksum = 0;
2117 u16 length = 0;
2118 u16 pointer = 0;
2119 u16 word = 0;
2120
2121 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2122
2123 /* Include 0x0-0x3F in the checksum */
2124 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2125 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2126 DEBUGOUT("EEPROM read failed\n");
2127 break;
2128 }
2129 checksum += word;
2130 }
2131
2132 /* Include all data from pointers except for the fw pointer */
2133 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2134 hw->eeprom.ops.read(hw, i, &pointer);
2135
2136 /* Make sure the pointer seems valid */
2137 if (pointer != 0xFFFF && pointer != 0) {
2138 hw->eeprom.ops.read(hw, pointer, &length);
2139
2140 if (length != 0xFFFF && length != 0) {
2141 for (j = pointer+1; j <= pointer+length; j++) {
2142 hw->eeprom.ops.read(hw, j, &word);
2143 checksum += word;
2144 }
2145 }
2146 }
2147 }
2148
2149 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2150
2151 return checksum;
2152}
2153
2154/**
2155 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2156 * @hw: pointer to hardware structure
2157 * @checksum_val: calculated checksum
2158 *
2159 * Performs checksum calculation and validates the EEPROM checksum. If the
2160 * caller does not need checksum_val, the value can be NULL.
2161 **/
2162s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2163 u16 *checksum_val)
2164{
2165 s32 status;
2166 u16 checksum;
2167 u16 read_checksum = 0;
2168
2169 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2170
2171 /*
2172 * Read the first word from the EEPROM. If this times out or fails, do
2173 * not continue or we could be in for a very long wait while every
2174 * EEPROM read fails
2175 */
2176 status = hw->eeprom.ops.read(hw, 0, &checksum);
2177
2178 if (status == IXGBE_SUCCESS) {
2179 checksum = hw->eeprom.ops.calc_checksum(hw);
2180
2181 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2182
2183 /*
2184 * Verify read checksum from EEPROM is the same as
2185 * calculated checksum
2186 */
2187 if (read_checksum != checksum)
2188 status = IXGBE_ERR_EEPROM_CHECKSUM;
2189
2190 /* If the user cares, return the calculated checksum */
2191 if (checksum_val)
2192 *checksum_val = checksum;
2193 } else {
2194 DEBUGOUT("EEPROM read failed\n");
2195 }
2196
2197 return status;
2198}
2199
2200/**
2201 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2202 * @hw: pointer to hardware structure
2203 **/
2204s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2205{
2206 s32 status;
2207 u16 checksum;
2208
2209 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2210
2211 /*
2212 * Read the first word from the EEPROM. If this times out or fails, do
2213 * not continue or we could be in for a very long wait while every
2214 * EEPROM read fails
2215 */
2216 status = hw->eeprom.ops.read(hw, 0, &checksum);
2217
2218 if (status == IXGBE_SUCCESS) {
2219 checksum = hw->eeprom.ops.calc_checksum(hw);
2220 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
2221 checksum);
2222 } else {
2223 DEBUGOUT("EEPROM read failed\n");
2224 }
2225
2226 return status;
2227}
2228
2229/**
2230 * ixgbe_validate_mac_addr - Validate MAC address
2231 * @mac_addr: pointer to MAC address.
2232 *
2233 * Tests a MAC address to ensure it is a valid Individual Address
2234 **/
2235s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2236{
2237 s32 status = IXGBE_SUCCESS;
2238
2239 DEBUGFUNC("ixgbe_validate_mac_addr");
2240
2241 /* Make sure it is not a multicast address */
2242 if (IXGBE_IS_MULTICAST(mac_addr)) {
2243 DEBUGOUT("MAC address is multicast\n");
2244 status = IXGBE_ERR_INVALID_MAC_ADDR;
2245 /* Not a broadcast address */
2246 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2247 DEBUGOUT("MAC address is broadcast\n");
2248 status = IXGBE_ERR_INVALID_MAC_ADDR;
2249 /* Reject the zero address */
2250 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2251 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2252 DEBUGOUT("MAC address is all zeros\n");
2253 status = IXGBE_ERR_INVALID_MAC_ADDR;
2254 }
2255 return status;
2256}
2257
2258/**
2259 * ixgbe_set_rar_generic - Set Rx address register
2260 * @hw: pointer to hardware structure
2261 * @index: Receive address register to write
2262 * @addr: Address to put into receive address register
2263 * @vmdq: VMDq "set" or "pool" index
2264 * @enable_addr: set flag that address is active
2265 *
2266 * Puts an ethernet address into a receive address register.
2267 **/
2268s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2269 u32 enable_addr)
2270{
2271 u32 rar_low, rar_high;
2272 u32 rar_entries = hw->mac.num_rar_entries;
2273
2274 DEBUGFUNC("ixgbe_set_rar_generic");
2275
2276 /* Make sure we are using a valid rar index range */
2277 if (index >= rar_entries) {
2278 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2279 "RAR index %d is out of range.\n", index);
2280 return IXGBE_ERR_INVALID_ARGUMENT;
2281 }
2282
2283 /* setup VMDq pool selection before this RAR gets enabled */
2284 hw->mac.ops.set_vmdq(hw, index, vmdq);
2285
2286 /*
2287 * HW expects these in little endian so we reverse the byte
2288 * order from network order (big endian) to little endian
2289 */
2290 rar_low = ((u32)addr[0] |
2291 ((u32)addr[1] << 8) |
2292 ((u32)addr[2] << 16) |
2293 ((u32)addr[3] << 24));
2294 /*
2295 * Some parts put the VMDq setting in the extra RAH bits,
2296 * so save everything except the lower 16 bits that hold part
2297 * of the address and the address valid bit.
2298 */
2299 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2300 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2301 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2302
2303 if (enable_addr != 0)
2304 rar_high |= IXGBE_RAH_AV;
2305
2306 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2307 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2308
2309 return IXGBE_SUCCESS;
2310}
2311
2312/**
2313 * ixgbe_clear_rar_generic - Remove Rx address register
2314 * @hw: pointer to hardware structure
2315 * @index: Receive address register to write
2316 *
2317 * Clears an ethernet address from a receive address register.
2318 **/
2319s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2320{
2321 u32 rar_high;
2322 u32 rar_entries = hw->mac.num_rar_entries;
2323
2324 DEBUGFUNC("ixgbe_clear_rar_generic");
2325
2326 /* Make sure we are using a valid rar index range */
2327 if (index >= rar_entries) {
2328 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2329 "RAR index %d is out of range.\n", index);
2330 return IXGBE_ERR_INVALID_ARGUMENT;
2331 }
2332
2333 /*
2334 * Some parts put the VMDq setting in the extra RAH bits,
2335 * so save everything except the lower 16 bits that hold part
2336 * of the address and the address valid bit.
2337 */
2338 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2339 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2340
2341 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2342 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2343
2344 /* clear VMDq pool/queue selection for this RAR */
2345 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2346
2347 return IXGBE_SUCCESS;
2348}
2349
2350/**
2351 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2352 * @hw: pointer to hardware structure
2353 *
2354 * Places the MAC address in receive address register 0 and clears the rest
2355 * of the receive address registers. Clears the multicast table. Assumes
2356 * the receiver is in reset when the routine is called.
2357 **/
2358s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2359{
2360 u32 i;
2361 u32 rar_entries = hw->mac.num_rar_entries;
2362
2363 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2364
2365 /*
2366 * If the current mac address is valid, assume it is a software override
2367 * to the permanent address.
2368 * Otherwise, use the permanent address from the eeprom.
2369 */
2370 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2371 IXGBE_ERR_INVALID_MAC_ADDR) {
2372 /* Get the MAC address from the RAR0 for later reference */
2373 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2374
2375 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2376 hw->mac.addr[0], hw->mac.addr[1],
2377 hw->mac.addr[2]);
2378 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2379 hw->mac.addr[4], hw->mac.addr[5]);
2380 } else {
2381 /* Setup the receive address. */
2382 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2383 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2384 hw->mac.addr[0], hw->mac.addr[1],
2385 hw->mac.addr[2]);
2386 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2387 hw->mac.addr[4], hw->mac.addr[5]);
2388
2389 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2390
2391 /* clear VMDq pool/queue selection for RAR 0 */
2392 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2393 }
2394 hw->addr_ctrl.overflow_promisc = 0;
2395
2396 hw->addr_ctrl.rar_used_count = 1;
2397
2398 /* Zero out the other receive addresses. */
2399 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2400 for (i = 1; i < rar_entries; i++) {
2401 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2402 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2403 }
2404
2405 /* Clear the MTA */
2406 hw->addr_ctrl.mta_in_use = 0;
2407 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2408
2409 DEBUGOUT(" Clearing MTA\n");
2410 for (i = 0; i < hw->mac.mcft_size; i++)
2411 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2412
2413 ixgbe_init_uta_tables(hw);
2414
2415 return IXGBE_SUCCESS;
2416}
2417
2418/**
2419 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2420 * @hw: pointer to hardware structure
2421 * @addr: new address
2422 *
2423 * Adds it to unused receive address register or goes into promiscuous mode.
2424 **/
2425void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2426{
2427 u32 rar_entries = hw->mac.num_rar_entries;
2428 u32 rar;
2429
2430 DEBUGFUNC("ixgbe_add_uc_addr");
2431
2432 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2433 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2434
2435 /*
2436 * Place this address in the RAR if there is room,
2437 * else put the controller into promiscuous mode
2438 */
2439 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2440 rar = hw->addr_ctrl.rar_used_count;
2441 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2442 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2443 hw->addr_ctrl.rar_used_count++;
2444 } else {
2445 hw->addr_ctrl.overflow_promisc++;
2446 }
2447
2448 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2449}
2450
2451/**
2452 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2453 * @hw: pointer to hardware structure
2454 * @addr_list: the list of new addresses
2455 * @addr_count: number of addresses
2456 * @next: iterator function to walk the address list
2457 *
2458 * The given list replaces any existing list. Clears the secondary addrs from
2459 * receive address registers. Uses unused receive address registers for the
2460 * first secondary addresses, and falls back to promiscuous mode as needed.
2461 *
2462 * Drivers using secondary unicast addresses must set user_set_promisc when
2463 * manually putting the device into promiscuous mode.
2464 **/
2465s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2466 u32 addr_count, ixgbe_mc_addr_itr next)
2467{
2468 u8 *addr;
2469 u32 i;
2470 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2471 u32 uc_addr_in_use;
2472 u32 fctrl;
2473 u32 vmdq;
2474
2475 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2476
2477 /*
2478 * Clear accounting of old secondary address list,
2479 * don't count RAR[0]
2480 */
2481 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2482 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2483 hw->addr_ctrl.overflow_promisc = 0;
2484
2485 /* Zero out the other receive addresses */
2486 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2487 for (i = 0; i < uc_addr_in_use; i++) {
2488 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2489 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2490 }
2491
2492 /* Add the new addresses */
2493 for (i = 0; i < addr_count; i++) {
2494 DEBUGOUT(" Adding the secondary addresses:\n");
2495 addr = next(hw, &addr_list, &vmdq);
2496 ixgbe_add_uc_addr(hw, addr, vmdq);
2497 }
2498
2499 if (hw->addr_ctrl.overflow_promisc) {
2500 /* enable promisc if not already in overflow or set by user */
2501 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2502 DEBUGOUT(" Entering address overflow promisc mode\n");
2503 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2504 fctrl |= IXGBE_FCTRL_UPE;
2505 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2506 }
2507 } else {
2508 /* only disable if set by overflow, not by user */
2509 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2510 DEBUGOUT(" Leaving address overflow promisc mode\n");
2511 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2512 fctrl &= ~IXGBE_FCTRL_UPE;
2513 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2514 }
2515 }
2516
2517 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2518 return IXGBE_SUCCESS;
2519}
2520
2521/**
2522 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2523 * @hw: pointer to hardware structure
2524 * @mc_addr: the multicast address
2525 *
2526 * Extracts the 12 bits, from a multicast address, to determine which
2527 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2528 * incoming rx multicast addresses, to determine the bit-vector to check in
2529 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2530 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2531 * to mc_filter_type.
2532 **/
2533static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2534{
2535 u32 vector = 0;
2536
2537 DEBUGFUNC("ixgbe_mta_vector");
2538
2539 switch (hw->mac.mc_filter_type) {
2540 case 0: /* use bits [47:36] of the address */
2541 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2542 break;
2543 case 1: /* use bits [46:35] of the address */
2544 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2545 break;
2546 case 2: /* use bits [45:34] of the address */
2547 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2548 break;
2549 case 3: /* use bits [43:32] of the address */
2550 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2551 break;
2552 default: /* Invalid mc_filter_type */
2553 DEBUGOUT("MC filter type param set incorrectly\n");
2554 ASSERT(0);
2555 break;
2556 }
2557
2558 /* vector can only be 12-bits or boundary will be exceeded */
2559 vector &= 0xFFF;
2560 return vector;
2561}
2562
2563/**
2564 * ixgbe_set_mta - Set bit-vector in multicast table
2565 * @hw: pointer to hardware structure
2566 * @hash_value: Multicast address hash value
2567 *
2568 * Sets the bit-vector in the multicast table.
2569 **/
2570void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2571{
2572 u32 vector;
2573 u32 vector_bit;
2574 u32 vector_reg;
2575
2576 DEBUGFUNC("ixgbe_set_mta");
2577
2578 hw->addr_ctrl.mta_in_use++;
2579
2580 vector = ixgbe_mta_vector(hw, mc_addr);
2581 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2582
2583 /*
2584 * The MTA is a register array of 128 32-bit registers. It is treated
2585 * like an array of 4096 bits. We want to set bit
2586 * BitArray[vector_value]. So we figure out what register the bit is
2587 * in, read it, OR in the new bit, then write back the new value. The
2588 * register is determined by the upper 7 bits of the vector value and
2589 * the bit within that register are determined by the lower 5 bits of
2590 * the value.
2591 */
2592 vector_reg = (vector >> 5) & 0x7F;
2593 vector_bit = vector & 0x1F;
2594 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2595}
2596
2597/**
2598 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2599 * @hw: pointer to hardware structure
2600 * @mc_addr_list: the list of new multicast addresses
2601 * @mc_addr_count: number of addresses
2602 * @next: iterator function to walk the multicast address list
2603 * @clear: flag, when set clears the table beforehand
2604 *
2605 * When the clear flag is set, the given list replaces any existing list.
2606 * Hashes the given addresses into the multicast table.
2607 **/
2608s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2609 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2610 bool clear)
2611{
2612 u32 i;
2613 u32 vmdq;
2614
2615 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2616
2617 /*
2618 * Set the new number of MC addresses that we are being requested to
2619 * use.
2620 */
2621 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2622 hw->addr_ctrl.mta_in_use = 0;
2623
2624 /* Clear mta_shadow */
2625 if (clear) {
2626 DEBUGOUT(" Clearing MTA\n");
2627 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2628 }
2629
2630 /* Update mta_shadow */
2631 for (i = 0; i < mc_addr_count; i++) {
2632 DEBUGOUT(" Adding the multicast addresses:\n");
2633 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2634 }
2635
2636 /* Enable mta */
2637 for (i = 0; i < hw->mac.mcft_size; i++)
2638 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2639 hw->mac.mta_shadow[i]);
2640
2641 if (hw->addr_ctrl.mta_in_use > 0)
2642 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2643 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2644
2645 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2646 return IXGBE_SUCCESS;
2647}
2648
2649/**
2650 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2651 * @hw: pointer to hardware structure
2652 *
2653 * Enables multicast address in RAR and the use of the multicast hash table.
2654 **/
2655s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2656{
2657 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2658
2659 DEBUGFUNC("ixgbe_enable_mc_generic");
2660
2661 if (a->mta_in_use > 0)
2662 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2663 hw->mac.mc_filter_type);
2664
2665 return IXGBE_SUCCESS;
2666}
2667
2668/**
2669 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2670 * @hw: pointer to hardware structure
2671 *
2672 * Disables multicast address in RAR and the use of the multicast hash table.
2673 **/
2674s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2675{
2676 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2677
2678 DEBUGFUNC("ixgbe_disable_mc_generic");
2679
2680 if (a->mta_in_use > 0)
2681 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2682
2683 return IXGBE_SUCCESS;
2684}
2685
2686/**
2687 * ixgbe_fc_enable_generic - Enable flow control
2688 * @hw: pointer to hardware structure
2689 *
2690 * Enable flow control according to the current settings.
2691 **/
2692s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2693{
2694 s32 ret_val = IXGBE_SUCCESS;
2695 u32 mflcn_reg, fccfg_reg;
2696 u32 reg;
2697 u32 fcrtl, fcrth;
2698 int i;
2699
2700 DEBUGFUNC("ixgbe_fc_enable_generic");
2701
2702 /* Validate the water mark configuration */
2703 if (!hw->fc.pause_time) {
2704 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2705 goto out;
2706 }
2707
2708 /* Low water mark of zero causes XOFF floods */
2709 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2710 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2711 hw->fc.high_water[i]) {
2712 if (!hw->fc.low_water[i] ||
2713 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2714 DEBUGOUT("Invalid water mark configuration\n");
2715 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2716 goto out;
2717 }
2718 }
2719 }
2720
2721 /* Negotiate the fc mode to use */
2722 ixgbe_fc_autoneg(hw);
2723
2724 /* Disable any previous flow control settings */
2725 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2726 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2727
2728 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2729 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2730
2731 /*
2732 * The possible values of fc.current_mode are:
2733 * 0: Flow control is completely disabled
2734 * 1: Rx flow control is enabled (we can receive pause frames,
2735 * but not send pause frames).
2736 * 2: Tx flow control is enabled (we can send pause frames but
2737 * we do not support receiving pause frames).
2738 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2739 * other: Invalid.
2740 */
2741 switch (hw->fc.current_mode) {
2742 case ixgbe_fc_none:
2743 /*
2744 * Flow control is disabled by software override or autoneg.
2745 * The code below will actually disable it in the HW.
2746 */
2747 break;
2748 case ixgbe_fc_rx_pause:
2749 /*
2750 * Rx Flow control is enabled and Tx Flow control is
2751 * disabled by software override. Since there really
2752 * isn't a way to advertise that we are capable of RX
2753 * Pause ONLY, we will advertise that we support both
2754 * symmetric and asymmetric Rx PAUSE. Later, we will
2755 * disable the adapter's ability to send PAUSE frames.
2756 */
2757 mflcn_reg |= IXGBE_MFLCN_RFCE;
2758 break;
2759 case ixgbe_fc_tx_pause:
2760 /*
2761 * Tx Flow control is enabled, and Rx Flow control is
2762 * disabled by software override.
2763 */
2764 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2765 break;
2766 case ixgbe_fc_full:
2767 /* Flow control (both Rx and Tx) is enabled by SW override. */
2768 mflcn_reg |= IXGBE_MFLCN_RFCE;
2769 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2770 break;
2771 default:
2772 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2773 "Flow control param set incorrectly\n");
2774 ret_val = IXGBE_ERR_CONFIG;
2775 goto out;
2776 break;
2777 }
2778
2779 /* Set 802.3x based flow control settings. */
2780 mflcn_reg |= IXGBE_MFLCN_DPF;
2781 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2782 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2783
2784
2785 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2786 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2787 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2788 hw->fc.high_water[i]) {
2789 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2790 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2791 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2792 } else {
2793 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2794 /*
2795 * In order to prevent Tx hangs when the internal Tx
2796 * switch is enabled we must set the high water mark
2797 * to the maximum FCRTH value. This allows the Tx
2798 * switch to function even under heavy Rx workloads.
2799 */
2800 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2801 }
2802
2803 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2804 }
2805
2806 /* Configure pause time (2 TCs per register) */
2807 reg = hw->fc.pause_time * 0x00010001;
2808 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2809 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2810
2811 /* Configure flow control refresh threshold value */
2812 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2813
2814out:
2815 return ret_val;
2816}
2817
2818/**
2819 * ixgbe_negotiate_fc - Negotiate flow control
2820 * @hw: pointer to hardware structure
2821 * @adv_reg: flow control advertised settings
2822 * @lp_reg: link partner's flow control settings
2823 * @adv_sym: symmetric pause bit in advertisement
2824 * @adv_asm: asymmetric pause bit in advertisement
2825 * @lp_sym: symmetric pause bit in link partner advertisement
2826 * @lp_asm: asymmetric pause bit in link partner advertisement
2827 *
2828 * Find the intersection between advertised settings and link partner's
2829 * advertised settings
2830 **/
2831static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2832 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2833{
2834 if ((!(adv_reg)) || (!(lp_reg))) {
2835 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2836 "Local or link partner's advertised flow control "
2837 "settings are NULL. Local: %x, link partner: %x\n",
2838 adv_reg, lp_reg);
2839 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2840 }
2841
2842 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2843 /*
2844 * Now we need to check if the user selected Rx ONLY
2845 * of pause frames. In this case, we had to advertise
2846 * FULL flow control because we could not advertise RX
2847 * ONLY. Hence, we must now check to see if we need to
2848 * turn OFF the TRANSMISSION of PAUSE frames.
2849 */
2850 if (hw->fc.requested_mode == ixgbe_fc_full) {
2851 hw->fc.current_mode = ixgbe_fc_full;
2852 DEBUGOUT("Flow Control = FULL.\n");
2853 } else {
2854 hw->fc.current_mode = ixgbe_fc_rx_pause;
2855 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2856 }
2857 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2858 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2859 hw->fc.current_mode = ixgbe_fc_tx_pause;
2860 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2861 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2862 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2863 hw->fc.current_mode = ixgbe_fc_rx_pause;
2864 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2865 } else {
2866 hw->fc.current_mode = ixgbe_fc_none;
2867 DEBUGOUT("Flow Control = NONE.\n");
2868 }
2869 return IXGBE_SUCCESS;
2870}
2871
2872/**
2873 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2874 * @hw: pointer to hardware structure
2875 *
2876 * Enable flow control according on 1 gig fiber.
2877 **/
2878static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2879{
2880 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2881 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2882
2883 /*
2884 * On multispeed fiber at 1g, bail out if
2885 * - link is up but AN did not complete, or if
2886 * - link is up and AN completed but timed out
2887 */
2888
2889 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2890 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2891 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2892 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2893 "Auto-Negotiation did not complete or timed out");
2894 goto out;
2895 }
2896
2897 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2898 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2899
2900 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2901 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2902 IXGBE_PCS1GANA_ASM_PAUSE,
2903 IXGBE_PCS1GANA_SYM_PAUSE,
2904 IXGBE_PCS1GANA_ASM_PAUSE);
2905
2906out:
2907 return ret_val;
2908}
2909
2910/**
2911 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2912 * @hw: pointer to hardware structure
2913 *
2914 * Enable flow control according to IEEE clause 37.
2915 **/
2916static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2917{
2918 u32 links2, anlp1_reg, autoc_reg, links;
2919 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2920
2921 /*
2922 * On backplane, bail out if
2923 * - backplane autoneg was not completed, or if
2924 * - we are 82599 and link partner is not AN enabled
2925 */
2926 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2927 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2928 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2929 "Auto-Negotiation did not complete");
2930 goto out;
2931 }
2932
2933 if (hw->mac.type == ixgbe_mac_82599EB) {
2934 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2935 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2936 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2937 "Link partner is not AN enabled");
2938 goto out;
2939 }
2940 }
2941 /*
2942 * Read the 10g AN autoc and LP ability registers and resolve
2943 * local flow control settings accordingly
2944 */
2945 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2946 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2947
2948 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2949 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2950 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2951
2952out:
2953 return ret_val;
2954}
2955
2956/**
2957 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2958 * @hw: pointer to hardware structure
2959 *
2960 * Enable flow control according to IEEE clause 37.
2961 **/
2962static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2963{
2964 u16 technology_ability_reg = 0;
2965 u16 lp_technology_ability_reg = 0;
2966
2967 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2968 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2969 &technology_ability_reg);
2970 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2971 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2972 &lp_technology_ability_reg);
2973
2974 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2975 (u32)lp_technology_ability_reg,
2976 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2977 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2978}
2979
2980/**
2981 * ixgbe_fc_autoneg - Configure flow control
2982 * @hw: pointer to hardware structure
2983 *
2984 * Compares our advertised flow control capabilities to those advertised by
2985 * our link partner, and determines the proper flow control mode to use.
2986 **/
2987void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2988{
2989 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2990 ixgbe_link_speed speed;
2991 bool link_up;
2992
2993 DEBUGFUNC("ixgbe_fc_autoneg");
2994
2995 /*
2996 * AN should have completed when the cable was plugged in.
2997 * Look for reasons to bail out. Bail out if:
2998 * - FC autoneg is disabled, or if
2999 * - link is not up.
3000 */
3001 if (hw->fc.disable_fc_autoneg) {
3002 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3003 "Flow control autoneg is disabled");
3004 goto out;
3005 }
3006
3007 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3008 if (!link_up) {
3009 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3010 goto out;
3011 }
3012
3013 switch (hw->phy.media_type) {
3014 /* Autoneg flow control on fiber adapters */
3015 case ixgbe_media_type_fiber_fixed:
3016 case ixgbe_media_type_fiber:
3017 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3018 ret_val = ixgbe_fc_autoneg_fiber(hw);
3019 break;
3020
3021 /* Autoneg flow control on backplane adapters */
3022 case ixgbe_media_type_backplane:
3023 ret_val = ixgbe_fc_autoneg_backplane(hw);
3024 break;
3025
3026 /* Autoneg flow control on copper adapters */
3027 case ixgbe_media_type_copper:
3028 if (ixgbe_device_supports_autoneg_fc(hw))
3029 ret_val = ixgbe_fc_autoneg_copper(hw);
3030 break;
3031
3032 default:
3033 break;
3034 }
3035
3036out:
3037 if (ret_val == IXGBE_SUCCESS) {
3038 hw->fc.fc_was_autonegged = TRUE;
3039 } else {
3040 hw->fc.fc_was_autonegged = FALSE;
3041 hw->fc.current_mode = hw->fc.requested_mode;
3042 }
3043}
3044
3045/*
3046 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3047 * @hw: pointer to hardware structure
3048 *
3049 * System-wide timeout range is encoded in PCIe Device Control2 register.
3050 *
3051 * Add 10% to specified maximum and return the number of times to poll for
3052 * completion timeout, in units of 100 microsec. Never return less than
3053 * 800 = 80 millisec.
3054 */
3055static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3056{
3057 s16 devctl2;
3058 u32 pollcnt;
3059
3060 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3061 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3062
3063 switch (devctl2) {
3064 case IXGBE_PCIDEVCTRL2_65_130ms:
3065 pollcnt = 1300; /* 130 millisec */
3066 break;
3067 case IXGBE_PCIDEVCTRL2_260_520ms:
3068 pollcnt = 5200; /* 520 millisec */
3069 break;
3070 case IXGBE_PCIDEVCTRL2_1_2s:
3071 pollcnt = 20000; /* 2 sec */
3072 break;
3073 case IXGBE_PCIDEVCTRL2_4_8s:
3074 pollcnt = 80000; /* 8 sec */
3075 break;
3076 case IXGBE_PCIDEVCTRL2_17_34s:
3077 pollcnt = 34000; /* 34 sec */
3078 break;
3079 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3080 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3081 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3082 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3083 default:
3084 pollcnt = 800; /* 80 millisec minimum */
3085 break;
3086 }
3087
3088 /* add 10% to spec maximum */
3089 return (pollcnt * 11) / 10;
3090}
3091
3092/**
3093 * ixgbe_disable_pcie_master - Disable PCI-express master access
3094 * @hw: pointer to hardware structure
3095 *
3096 * Disables PCI-Express master access and verifies there are no pending
3097 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3098 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3099 * is returned signifying master requests disabled.
3100 **/
3101s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3102{
3103 s32 status = IXGBE_SUCCESS;
3104 u32 i, poll;
3105
3106 DEBUGFUNC("ixgbe_disable_pcie_master");
3107
3108 /* Always set this bit to ensure any future transactions are blocked */
3109 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3110
3111 /* Exit if master requests are blocked */
3112 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3113 goto out;
3114
3115 /* Poll for master request bit to clear */
3116 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3117 usec_delay(100);
3118 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3119 goto out;
3120 }
3121
3122 /*
3123 * Two consecutive resets are required via CTRL.RST per datasheet
3124 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3125 * of this need. The first reset prevents new master requests from
3126 * being issued by our device. We then must wait 1usec or more for any
3127 * remaining completions from the PCIe bus to trickle in, and then reset
3128 * again to clear out any effects they may have had on our device.
3129 */
3130 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3131 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3132
3133 /*
3134 * Before proceeding, make sure that the PCIe block does not have
3135 * transactions pending.
3136 */
3137 poll = ixgbe_pcie_timeout_poll(hw);
3138 for (i = 0; i < poll; i++) {
3139 usec_delay(100);
3140 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
3141 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3142 goto out;
3143 }
3144
3145 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3146 "PCIe transaction pending bit also did not clear.\n");
3147 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3148
3149out:
3150 return status;
3151}
3152
3153/**
3154 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3155 * @hw: pointer to hardware structure
3156 * @mask: Mask to specify which semaphore to acquire
3157 *
3158 * Acquires the SWFW semaphore through the GSSR register for the specified
3159 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3160 **/
3161s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3162{
3163 u32 gssr = 0;
3164 u32 swmask = mask;
3165 u32 fwmask = mask << 5;
3166 u32 timeout = 200;
3167 u32 i;
3168
3169 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3170
3171 for (i = 0; i < timeout; i++) {
3172 /*
3173 * SW NVM semaphore bit is used for access to all
3174 * SW_FW_SYNC bits (not just NVM)
3175 */
3176 if (ixgbe_get_eeprom_semaphore(hw))
3177 return IXGBE_ERR_SWFW_SYNC;
3178
3179 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3180 if (!(gssr & (fwmask | swmask))) {
3181 gssr |= swmask;
3182 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3183 ixgbe_release_eeprom_semaphore(hw);
3184 return IXGBE_SUCCESS;
3185 } else {
3186 /* Resource is currently in use by FW or SW */
3187 ixgbe_release_eeprom_semaphore(hw);
3188 msec_delay(5);
3189 }
3190 }
3191
3192 /* If time expired clear the bits holding the lock and retry */
3193 if (gssr & (fwmask | swmask))
3194 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3195
3196 msec_delay(5);
3197 return IXGBE_ERR_SWFW_SYNC;
3198}
3199
3200/**
3201 * ixgbe_release_swfw_sync - Release SWFW semaphore
3202 * @hw: pointer to hardware structure
3203 * @mask: Mask to specify which semaphore to release
3204 *
3205 * Releases the SWFW semaphore through the GSSR register for the specified
3206 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3207 **/
3208void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3209{
3210 u32 gssr;
3211 u32 swmask = mask;
3212
3213 DEBUGFUNC("ixgbe_release_swfw_sync");
3214
3215 ixgbe_get_eeprom_semaphore(hw);
3216
3217 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3218 gssr &= ~swmask;
3219 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3220
3221 ixgbe_release_eeprom_semaphore(hw);
3222}
3223
3224/**
3225 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3226 * @hw: pointer to hardware structure
3227 *
3228 * Stops the receive data path and waits for the HW to internally empty
3229 * the Rx security block
3230 **/
3231s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3232{
3233#define IXGBE_MAX_SECRX_POLL 40
3234
3235 int i;
3236 int secrxreg;
3237
3238 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3239
3240
3241 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3242 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3243 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3244 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3245 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3246 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3247 break;
3248 else
3249 /* Use interrupt-safe sleep just in case */
3250 usec_delay(1000);
3251 }
3252
3253 /* For informational purposes only */
3254 if (i >= IXGBE_MAX_SECRX_POLL)
3255 DEBUGOUT("Rx unit being enabled before security "
3256 "path fully disabled. Continuing with init.\n");
3257
3258 return IXGBE_SUCCESS;
3259}
3260
3261/**
3262 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3263 * @hw: pointer to hardware structure
3264 *
3265 * Enables the receive data path.
3266 **/
3267s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3268{
3269 int secrxreg;
3270
3271 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3272
3273 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3274 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3275 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3276 IXGBE_WRITE_FLUSH(hw);
3277
3278 return IXGBE_SUCCESS;
3279}
3280
3281/**
3282 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3283 * @hw: pointer to hardware structure
3284 * @regval: register value to write to RXCTRL
3285 *
3286 * Enables the Rx DMA unit
3287 **/
3288s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3289{
3290 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3291
3292 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3293
3294 return IXGBE_SUCCESS;
3295}
3296
3297/**
3298 * ixgbe_blink_led_start_generic - Blink LED based on index.
3299 * @hw: pointer to hardware structure
3300 * @index: led number to blink
3301 **/
3302s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3303{
3304 ixgbe_link_speed speed = 0;
3305 bool link_up = 0;
3306 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3307 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3308 s32 ret_val = IXGBE_SUCCESS;
3309
3310 DEBUGFUNC("ixgbe_blink_led_start_generic");
3311
3312 /*
3313 * Link must be up to auto-blink the LEDs;
3314 * Force it if link is down.
3315 */
3316 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3317
3318 if (!link_up) {
3319 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3320 * LESM is on.
3321 */
3322 bool got_lock = FALSE;
3323 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3324 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3325 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3326 IXGBE_GSSR_MAC_CSR_SM);
3327 if (ret_val != IXGBE_SUCCESS) {
3328 ret_val = IXGBE_ERR_SWFW_SYNC;
3329 goto out;
3330 }
3331 got_lock = TRUE;
3332 }
3333
3334 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3335 autoc_reg |= IXGBE_AUTOC_FLU;
3336 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3337 IXGBE_WRITE_FLUSH(hw);
3338
3339 if (got_lock)
3340 hw->mac.ops.release_swfw_sync(hw,
3341 IXGBE_GSSR_MAC_CSR_SM);
3342 msec_delay(10);
3343 }
3344
3345 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3346 led_reg |= IXGBE_LED_BLINK(index);
3347 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3348 IXGBE_WRITE_FLUSH(hw);
3349
3350out:
3351 return ret_val;
3352}
3353
3354/**
3355 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3356 * @hw: pointer to hardware structure
3357 * @index: led number to stop blinking
3358 **/
3359s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3360{
3361 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3362 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3363 s32 ret_val = IXGBE_SUCCESS;
3364 bool got_lock = FALSE;
3365
3366 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3367 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3368 * LESM is on.
3369 */
3370 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3371 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3372 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3373 IXGBE_GSSR_MAC_CSR_SM);
3374 if (ret_val != IXGBE_SUCCESS) {
3375 ret_val = IXGBE_ERR_SWFW_SYNC;
3376 goto out;
3377 }
3378 got_lock = TRUE;
3379 }
3380
3381
3382 autoc_reg &= ~IXGBE_AUTOC_FLU;
3383 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3384 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3385
3386 if (hw->mac.type == ixgbe_mac_82599EB)
3387 ixgbe_reset_pipeline_82599(hw);
3388
3389 if (got_lock)
3390 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3391
3392 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3393 led_reg &= ~IXGBE_LED_BLINK(index);
3394 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3395 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3396 IXGBE_WRITE_FLUSH(hw);
3397
3398out:
3399 return ret_val;
3400}
3401
3402/**
3403 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3404 * @hw: pointer to hardware structure
3405 * @san_mac_offset: SAN MAC address offset
3406 *
3407 * This function will read the EEPROM location for the SAN MAC address
3408 * pointer, and returns the value at that location. This is used in both
3409 * get and set mac_addr routines.
3410 **/
3411static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3412 u16 *san_mac_offset)
3413{
3414 s32 ret_val;
3415
3416 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3417
3418 /*
3419 * First read the EEPROM pointer to see if the MAC addresses are
3420 * available.
3421 */
3422 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3423 san_mac_offset);
3424 if (ret_val) {
3425 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3426 "eeprom at offset %d failed",
3427 IXGBE_SAN_MAC_ADDR_PTR);
3428 }
3429
3430 return ret_val;
3431}
3432
3433/**
3434 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3435 * @hw: pointer to hardware structure
3436 * @san_mac_addr: SAN MAC address
3437 *
3438 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3439 * per-port, so set_lan_id() must be called before reading the addresses.
3440 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3441 * upon for non-SFP connections, so we must call it here.
3442 **/
3443s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3444{
3445 u16 san_mac_data, san_mac_offset;
3446 u8 i;
3447 s32 ret_val;
3448
3449 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3450
3451 /*
3452 * First read the EEPROM pointer to see if the MAC addresses are
3453 * available. If they're not, no point in calling set_lan_id() here.
3454 */
3455 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3456 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3457 goto san_mac_addr_out;
3458
3459 /* make sure we know which port we need to program */
3460 hw->mac.ops.set_lan_id(hw);
3461 /* apply the port offset to the address offset */
3462 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3463 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3464 for (i = 0; i < 3; i++) {
3465 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3466 &san_mac_data);
3467 if (ret_val) {
3468 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3469 "eeprom read at offset %d failed",
3470 san_mac_offset);
3471 goto san_mac_addr_out;
3472 }
3473 san_mac_addr[i * 2] = (u8)(san_mac_data);
3474 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3475 san_mac_offset++;
3476 }
3477 return IXGBE_SUCCESS;
3478
3479san_mac_addr_out:
3480 /*
3481 * No addresses available in this EEPROM. It's not an
3482 * error though, so just wipe the local address and return.
3483 */
3484 for (i = 0; i < 6; i++)
3485 san_mac_addr[i] = 0xFF;
3486 return IXGBE_SUCCESS;
3487}
3488
3489/**
3490 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3491 * @hw: pointer to hardware structure
3492 * @san_mac_addr: SAN MAC address
3493 *
3494 * Write a SAN MAC address to the EEPROM.
3495 **/
3496s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3497{
3498 s32 ret_val;
3499 u16 san_mac_data, san_mac_offset;
3500 u8 i;
3501
3502 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3503
3504 /* Look for SAN mac address pointer. If not defined, return */
3505 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3506 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3507 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3508
3509 /* Make sure we know which port we need to write */
3510 hw->mac.ops.set_lan_id(hw);
3511 /* Apply the port offset to the address offset */
3512 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3513 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3514
3515 for (i = 0; i < 3; i++) {
3516 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3517 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3518 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3519 san_mac_offset++;
3520 }
3521
3522 return IXGBE_SUCCESS;
3523}
3524
3525/**
3526 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3527 * @hw: pointer to hardware structure
3528 *
3529 * Read PCIe configuration space, and get the MSI-X vector count from
3530 * the capabilities table.
3531 **/
3532u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3533{
3534 u16 msix_count = 1;
3535 u16 max_msix_count;
3536 u16 pcie_offset;
3537
3538 switch (hw->mac.type) {
3539 case ixgbe_mac_82598EB:
3540 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3541 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3542 break;
3543 case ixgbe_mac_82599EB:
3544 case ixgbe_mac_X540:
3545 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3546 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3547 break;
3548 default:
3549 return msix_count;
3550 }
3551
3552 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3553 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3554 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3555
3556 /* MSI-X count is zero-based in HW */
3557 msix_count++;
3558
3559 if (msix_count > max_msix_count)
3560 msix_count = max_msix_count;
3561
3562 return msix_count;
3563}
3564
3565/**
3566 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3567 * @hw: pointer to hardware structure
3568 * @addr: Address to put into receive address register
3569 * @vmdq: VMDq pool to assign
3570 *
3571 * Puts an ethernet address into a receive address register, or
3572 * finds the rar that it is aleady in; adds to the pool list
3573 **/
3574s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3575{
3576 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3577 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3578 u32 rar;
3579 u32 rar_low, rar_high;
3580 u32 addr_low, addr_high;
3581
3582 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3583
3584 /* swap bytes for HW little endian */
3585 addr_low = addr[0] | (addr[1] << 8)
3586 | (addr[2] << 16)
3587 | (addr[3] << 24);
3588 addr_high = addr[4] | (addr[5] << 8);
3589
3590 /*
3591 * Either find the mac_id in rar or find the first empty space.
3592 * rar_highwater points to just after the highest currently used
3593 * rar in order to shorten the search. It grows when we add a new
3594 * rar to the top.
3595 */
3596 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3597 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3598
3599 if (((IXGBE_RAH_AV & rar_high) == 0)
3600 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3601 first_empty_rar = rar;
3602 } else if ((rar_high & 0xFFFF) == addr_high) {
3603 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3604 if (rar_low == addr_low)
3605 break; /* found it already in the rars */
3606 }
3607 }
3608
3609 if (rar < hw->mac.rar_highwater) {
3610 /* already there so just add to the pool bits */
3611 ixgbe_set_vmdq(hw, rar, vmdq);
3612 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3613 /* stick it into first empty RAR slot we found */
3614 rar = first_empty_rar;
3615 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3616 } else if (rar == hw->mac.rar_highwater) {
3617 /* add it to the top of the list and inc the highwater mark */
3618 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3619 hw->mac.rar_highwater++;
3620 } else if (rar >= hw->mac.num_rar_entries) {
3621 return IXGBE_ERR_INVALID_MAC_ADDR;
3622 }
3623
3624 /*
3625 * If we found rar[0], make sure the default pool bit (we use pool 0)
3626 * remains cleared to be sure default pool packets will get delivered
3627 */
3628 if (rar == 0)
3629 ixgbe_clear_vmdq(hw, rar, 0);
3630
3631 return rar;
3632}
3633
3634/**
3635 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3636 * @hw: pointer to hardware struct
3637 * @rar: receive address register index to disassociate
3638 * @vmdq: VMDq pool index to remove from the rar
3639 **/
3640s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3641{
3642 u32 mpsar_lo, mpsar_hi;
3643 u32 rar_entries = hw->mac.num_rar_entries;
3644
3645 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3646
3647 /* Make sure we are using a valid rar index range */
3648 if (rar >= rar_entries) {
3649 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3650 "RAR index %d is out of range.\n", rar);
3651 return IXGBE_ERR_INVALID_ARGUMENT;
3652 }
3653
3654 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3655 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3656
3657 if (!mpsar_lo && !mpsar_hi)
3658 goto done;
3659
3660 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3661 if (mpsar_lo) {
3662 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3663 mpsar_lo = 0;
3664 }
3665 if (mpsar_hi) {
3666 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3667 mpsar_hi = 0;
3668 }
3669 } else if (vmdq < 32) {
3670 mpsar_lo &= ~(1 << vmdq);
3671 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3672 } else {
3673 mpsar_hi &= ~(1 << (vmdq - 32));
3674 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3675 }
3676
3677 /* was that the last pool using this rar? */
3678 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3679 hw->mac.ops.clear_rar(hw, rar);
3680done:
3681 return IXGBE_SUCCESS;
3682}
3683
3684/**
3685 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3686 * @hw: pointer to hardware struct
3687 * @rar: receive address register index to associate with a VMDq index
3688 * @vmdq: VMDq pool index
3689 **/
3690s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3691{
3692 u32 mpsar;
3693 u32 rar_entries = hw->mac.num_rar_entries;
3694
3695 DEBUGFUNC("ixgbe_set_vmdq_generic");
3696
3697 /* Make sure we are using a valid rar index range */
3698 if (rar >= rar_entries) {
3699 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3700 "RAR index %d is out of range.\n", rar);
3701 return IXGBE_ERR_INVALID_ARGUMENT;
3702 }
3703
3704 if (vmdq < 32) {
3705 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3706 mpsar |= 1 << vmdq;
3707 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3708 } else {
3709 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3710 mpsar |= 1 << (vmdq - 32);
3711 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3712 }
3713 return IXGBE_SUCCESS;
3714}
3715
3716/**
3717 * This function should only be involved in the IOV mode.
3718 * In IOV mode, Default pool is next pool after the number of
3719 * VFs advertized and not 0.
3720 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3721 *
3722 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3723 * @hw: pointer to hardware struct
3724 * @vmdq: VMDq pool index
3725 **/
3726s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3727{
3728 u32 rar = hw->mac.san_mac_rar_index;
3729
3730 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3731
3732 if (vmdq < 32) {
3733 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3734 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3735 } else {
3736 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3737 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3738 }
3739
3740 return IXGBE_SUCCESS;
3741}
3742
3743/**
3744 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3745 * @hw: pointer to hardware structure
3746 **/
3747s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3748{
3749 int i;
3750
3751 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3752 DEBUGOUT(" Clearing UTA\n");
3753
3754 for (i = 0; i < 128; i++)
3755 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3756
3757 return IXGBE_SUCCESS;
3758}
3759
3760/**
3761 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3762 * @hw: pointer to hardware structure
3763 * @vlan: VLAN id to write to VLAN filter
3764 *
3765 * return the VLVF index where this VLAN id should be placed
3766 *
3767 **/
3768s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3769{
3770 u32 bits = 0;
3771 u32 first_empty_slot = 0;
3772 s32 regindex;
3773
3774 /* short cut the special case */
3775 if (vlan == 0)
3776 return 0;
3777
3778 /*
3779 * Search for the vlan id in the VLVF entries. Save off the first empty
3780 * slot found along the way
3781 */
3782 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3783 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3784 if (!bits && !(first_empty_slot))
3785 first_empty_slot = regindex;
3786 else if ((bits & 0x0FFF) == vlan)
3787 break;
3788 }
3789
3790 /*
3791 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3792 * in the VLVF. Else use the first empty VLVF register for this
3793 * vlan id.
3794 */
3795 if (regindex >= IXGBE_VLVF_ENTRIES) {
3796 if (first_empty_slot)
3797 regindex = first_empty_slot;
3798 else {
3799 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3800 "No space in VLVF.\n");
3801 regindex = IXGBE_ERR_NO_SPACE;
3802 }
3803 }
3804
3805 return regindex;
3806}
3807
3808/**
3809 * ixgbe_set_vfta_generic - Set VLAN filter table
3810 * @hw: pointer to hardware structure
3811 * @vlan: VLAN id to write to VLAN filter
3812 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3813 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3814 *
3815 * Turn on/off specified VLAN in the VLAN filter table.
3816 **/
3817s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3818 bool vlan_on)
3819{
3820 s32 regindex;
3821 u32 bitindex;
3822 u32 vfta;
3823 u32 targetbit;
3824 s32 ret_val = IXGBE_SUCCESS;
3825 bool vfta_changed = FALSE;
3826
3827 DEBUGFUNC("ixgbe_set_vfta_generic");
3828
3829 if (vlan > 4095)
3830 return IXGBE_ERR_PARAM;
3831
3832 /*
3833 * this is a 2 part operation - first the VFTA, then the
3834 * VLVF and VLVFB if VT Mode is set
3835 * We don't write the VFTA until we know the VLVF part succeeded.
3836 */
3837
3838 /* Part 1
3839 * The VFTA is a bitstring made up of 128 32-bit registers
3840 * that enable the particular VLAN id, much like the MTA:
3841 * bits[11-5]: which register
3842 * bits[4-0]: which bit in the register
3843 */
3844 regindex = (vlan >> 5) & 0x7F;
3845 bitindex = vlan & 0x1F;
3846 targetbit = (1 << bitindex);
3847 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3848
3849 if (vlan_on) {
3850 if (!(vfta & targetbit)) {
3851 vfta |= targetbit;
3852 vfta_changed = TRUE;
3853 }
3854 } else {
3855 if ((vfta & targetbit)) {
3856 vfta &= ~targetbit;
3857 vfta_changed = TRUE;
3858 }
3859 }
3860
3861 /* Part 2
3862 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3863 */
3864 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3865 &vfta_changed);
3866 if (ret_val != IXGBE_SUCCESS)
3867 return ret_val;
3868
3869 if (vfta_changed)
3870 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3871
3872 return IXGBE_SUCCESS;
3873}
3874
3875/**
3876 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3877 * @hw: pointer to hardware structure
3878 * @vlan: VLAN id to write to VLAN filter
3879 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3880 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3881 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3882 * should be changed
3883 *
3884 * Turn on/off specified bit in VLVF table.
3885 **/
3886s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3887 bool vlan_on, bool *vfta_changed)
3888{
3889 u32 vt;
3890
3891 DEBUGFUNC("ixgbe_set_vlvf_generic");
3892
3893 if (vlan > 4095)
3894 return IXGBE_ERR_PARAM;
3895
3896 /* If VT Mode is set
3897 * Either vlan_on
3898 * make sure the vlan is in VLVF
3899 * set the vind bit in the matching VLVFB
3900 * Or !vlan_on
3901 * clear the pool bit and possibly the vind
3902 */
3903 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3904 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3905 s32 vlvf_index;
3906 u32 bits;
3907
3908 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3909 if (vlvf_index < 0)
3910 return vlvf_index;
3911
3912 if (vlan_on) {
3913 /* set the pool bit */
3914 if (vind < 32) {
3915 bits = IXGBE_READ_REG(hw,
3916 IXGBE_VLVFB(vlvf_index * 2));
3917 bits |= (1 << vind);
3918 IXGBE_WRITE_REG(hw,
3919 IXGBE_VLVFB(vlvf_index * 2),
3920 bits);
3921 } else {
3922 bits = IXGBE_READ_REG(hw,
3923 IXGBE_VLVFB((vlvf_index * 2) + 1));
3924 bits |= (1 << (vind - 32));
3925 IXGBE_WRITE_REG(hw,
3926 IXGBE_VLVFB((vlvf_index * 2) + 1),
3927 bits);
3928 }
3929 } else {
3930 /* clear the pool bit */
3931 if (vind < 32) {
3932 bits = IXGBE_READ_REG(hw,
3933 IXGBE_VLVFB(vlvf_index * 2));
3934 bits &= ~(1 << vind);
3935 IXGBE_WRITE_REG(hw,
3936 IXGBE_VLVFB(vlvf_index * 2),
3937 bits);
3938 bits |= IXGBE_READ_REG(hw,
3939 IXGBE_VLVFB((vlvf_index * 2) + 1));
3940 } else {
3941 bits = IXGBE_READ_REG(hw,
3942 IXGBE_VLVFB((vlvf_index * 2) + 1));
3943 bits &= ~(1 << (vind - 32));
3944 IXGBE_WRITE_REG(hw,
3945 IXGBE_VLVFB((vlvf_index * 2) + 1),
3946 bits);
3947 bits |= IXGBE_READ_REG(hw,
3948 IXGBE_VLVFB(vlvf_index * 2));
3949 }
3950 }
3951
3952 /*
3953 * If there are still bits set in the VLVFB registers
3954 * for the VLAN ID indicated we need to see if the
3955 * caller is requesting that we clear the VFTA entry bit.
3956 * If the caller has requested that we clear the VFTA
3957 * entry bit but there are still pools/VFs using this VLAN
3958 * ID entry then ignore the request. We're not worried
3959 * about the case where we're turning the VFTA VLAN ID
3960 * entry bit on, only when requested to turn it off as
3961 * there may be multiple pools and/or VFs using the
3962 * VLAN ID entry. In that case we cannot clear the
3963 * VFTA bit until all pools/VFs using that VLAN ID have also
3964 * been cleared. This will be indicated by "bits" being
3965 * zero.
3966 */
3967 if (bits) {
3968 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3969 (IXGBE_VLVF_VIEN | vlan));
3970 if ((!vlan_on) && (vfta_changed != NULL)) {
3971 /* someone wants to clear the vfta entry
3972 * but some pools/VFs are still using it.
3973 * Ignore it. */
3974 *vfta_changed = FALSE;
3975 }
3976 } else
3977 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3978 }
3979
3980 return IXGBE_SUCCESS;
3981}
3982
3983/**
3984 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3985 * @hw: pointer to hardware structure
3986 *
3987 * Clears the VLAN filer table, and the VMDq index associated with the filter
3988 **/
3989s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3990{
3991 u32 offset;
3992
3993 DEBUGFUNC("ixgbe_clear_vfta_generic");
3994
3995 for (offset = 0; offset < hw->mac.vft_size; offset++)
3996 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3997
3998 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3999 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4000 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4001 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4002 }
4003
4004 return IXGBE_SUCCESS;
4005}
4006
4007/**
4008 * ixgbe_check_mac_link_generic - Determine link and speed status
4009 * @hw: pointer to hardware structure
4010 * @speed: pointer to link speed
4011 * @link_up: TRUE when link is up
4012 * @link_up_wait_to_complete: bool used to wait for link up or not
4013 *
4014 * Reads the links register to determine if link is up and the current speed
4015 **/
4016s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4017 bool *link_up, bool link_up_wait_to_complete)
4018{
4019 u32 links_reg, links_orig;
4020 u32 i;
4021
4022 DEBUGFUNC("ixgbe_check_mac_link_generic");
4023
4024 /* clear the old state */
4025 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4026
4027 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4028
4029 if (links_orig != links_reg) {
4030 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4031 links_orig, links_reg);
4032 }
4033
4034 if (link_up_wait_to_complete) {
4035 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
4036 if (links_reg & IXGBE_LINKS_UP) {
4037 *link_up = TRUE;
4038 break;
4039 } else {
4040 *link_up = FALSE;
4041 }
4042 msec_delay(100);
4043 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4044 }
4045 } else {
4046 if (links_reg & IXGBE_LINKS_UP)
4047 *link_up = TRUE;
4048 else
4049 *link_up = FALSE;
4050 }
4051
4052 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4053 IXGBE_LINKS_SPEED_10G_82599)
4054 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4055 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4056 IXGBE_LINKS_SPEED_1G_82599)
4057 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4058 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4059 IXGBE_LINKS_SPEED_100_82599)
4060 *speed = IXGBE_LINK_SPEED_100_FULL;
4061 else
4062 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4063
4064 return IXGBE_SUCCESS;
4065}
4066
4067/**
4068 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4069 * the EEPROM
4070 * @hw: pointer to hardware structure
4071 * @wwnn_prefix: the alternative WWNN prefix
4072 * @wwpn_prefix: the alternative WWPN prefix
4073 *
4074 * This function will read the EEPROM from the alternative SAN MAC address
4075 * block to check the support for the alternative WWNN/WWPN prefix support.
4076 **/
4077s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4078 u16 *wwpn_prefix)
4079{
4080 u16 offset, caps;
4081 u16 alt_san_mac_blk_offset;
4082
4083 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4084
4085 /* clear output first */
4086 *wwnn_prefix = 0xFFFF;
4087 *wwpn_prefix = 0xFFFF;
4088
4089 /* check if alternative SAN MAC is supported */
4090 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4091 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4092 goto wwn_prefix_err;
4093
4094 if ((alt_san_mac_blk_offset == 0) ||
4095 (alt_san_mac_blk_offset == 0xFFFF))
4096 goto wwn_prefix_out;
4097
4098 /* check capability in alternative san mac address block */
4099 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4100 if (hw->eeprom.ops.read(hw, offset, &caps))
4101 goto wwn_prefix_err;
4102 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4103 goto wwn_prefix_out;
4104
4105 /* get the corresponding prefix for WWNN/WWPN */
4106 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4107 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4108 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4109 "eeprom read at offset %d failed", offset);
4110 }
4111
4112 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4113 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4114 goto wwn_prefix_err;
4115
4116wwn_prefix_out:
4117 return IXGBE_SUCCESS;
4118
4119wwn_prefix_err:
4120 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4121 "eeprom read at offset %d failed", offset);
4122 return IXGBE_SUCCESS;
4123}
4124
4125/**
4126 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4127 * @hw: pointer to hardware structure
4128 * @bs: the fcoe boot status
4129 *
4130 * This function will read the FCOE boot status from the iSCSI FCOE block
4131 **/
4132s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4133{
4134 u16 offset, caps, flags;
4135 s32 status;
4136
4137 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4138
4139 /* clear output first */
4140 *bs = ixgbe_fcoe_bootstatus_unavailable;
4141
4142 /* check if FCOE IBA block is present */
4143 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4144 status = hw->eeprom.ops.read(hw, offset, &caps);
4145 if (status != IXGBE_SUCCESS)
4146 goto out;
4147
4148 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4149 goto out;
4150
4151 /* check if iSCSI FCOE block is populated */
4152 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4153 if (status != IXGBE_SUCCESS)
4154 goto out;
4155
4156 if ((offset == 0) || (offset == 0xFFFF))
4157 goto out;
4158
4159 /* read fcoe flags in iSCSI FCOE block */
4160 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4161 status = hw->eeprom.ops.read(hw, offset, &flags);
4162 if (status != IXGBE_SUCCESS)
4163 goto out;
4164
4165 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4166 *bs = ixgbe_fcoe_bootstatus_enabled;
4167 else
4168 *bs = ixgbe_fcoe_bootstatus_disabled;
4169
4170out:
4171 return status;
4172}
4173
4174/**
4175 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4176 * @hw: pointer to hardware structure
4177 * @enable: enable or disable switch for anti-spoofing
4178 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
4179 *
4180 **/
4181void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4182{
4183 int j;
4184 int pf_target_reg = pf >> 3;
4185 int pf_target_shift = pf % 8;
4186 u32 pfvfspoof = 0;
4187
4188 if (hw->mac.type == ixgbe_mac_82598EB)
4189 return;
4190
4191 if (enable)
4192 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4193
4194 /*
4195 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4196 * MAC anti-spoof enables in each register array element.
4197 */
4198 for (j = 0; j < pf_target_reg; j++)
4199 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4200
4201 /*
4202 * The PF should be allowed to spoof so that it can support
4203 * emulation mode NICs. Do not set the bits assigned to the PF
4204 */
4205 pfvfspoof &= (1 << pf_target_shift) - 1;
4206 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4207
4208 /*
4209 * Remaining pools belong to the PF so they do not need to have
4210 * anti-spoofing enabled.
4211 */
4212 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4213 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4214}
4215
4216/**
4217 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4218 * @hw: pointer to hardware structure
4219 * @enable: enable or disable switch for VLAN anti-spoofing
4220 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4221 *
4222 **/
4223void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4224{
4225 int vf_target_reg = vf >> 3;
4226 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4227 u32 pfvfspoof;
4228
4229 if (hw->mac.type == ixgbe_mac_82598EB)
4230 return;
4231
4232 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4233 if (enable)
4234 pfvfspoof |= (1 << vf_target_shift);
4235 else
4236 pfvfspoof &= ~(1 << vf_target_shift);
4237 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4238}
4239
4240/**
4241 * ixgbe_get_device_caps_generic - Get additional device capabilities
4242 * @hw: pointer to hardware structure
4243 * @device_caps: the EEPROM word with the extra device capabilities
4244 *
4245 * This function will read the EEPROM location for the device capabilities,
4246 * and return the word through device_caps.
4247 **/
4248s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4249{
4250 DEBUGFUNC("ixgbe_get_device_caps_generic");
4251
4252 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4253
4254 return IXGBE_SUCCESS;
4255}
4256
4257/**
4258 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4259 * @hw: pointer to hardware structure
4260 *
4261 **/
4262void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4263{
4264 u32 regval;
4265 u32 i;
4266
4267 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4268
4269 /* Enable relaxed ordering */
4270 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4271 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4272 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4273 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4274 }
4275
4276 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4277 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4278 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4279 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4280 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4281 }
4282
4283}
4284
4285/**
4286 * ixgbe_calculate_checksum - Calculate checksum for buffer
4287 * @buffer: pointer to EEPROM
4288 * @length: size of EEPROM to calculate a checksum for
4289 * Calculates the checksum for some buffer on a specified length. The
4290 * checksum calculated is returned.
4291 **/
4292u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4293{
4294 u32 i;
4295 u8 sum = 0;
4296
4297 DEBUGFUNC("ixgbe_calculate_checksum");
4298
4299 if (!buffer)
4300 return 0;
4301
4302 for (i = 0; i < length; i++)
4303 sum += buffer[i];
4304
4305 return (u8) (0 - sum);
4306}
4307
4308/**
4309 * ixgbe_host_interface_command - Issue command to manageability block
4310 * @hw: pointer to the HW structure
4311 * @buffer: contains the command to write and where the return status will
4312 * be placed
4313 * @length: length of buffer, must be multiple of 4 bytes
4314 *
4315 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4316 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4317 **/
4318s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4319 u32 length)
4320{
4321 u32 hicr, i, bi;
4322 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4323 u8 buf_len, dword_len;
4324
4325 s32 ret_val = IXGBE_SUCCESS;
4326
4327 DEBUGFUNC("ixgbe_host_interface_command");
4328
4329 if (length == 0 || length & 0x3 ||
4330 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4331 DEBUGOUT("Buffer length failure.\n");
4332 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4333 goto out;
4334 }
4335
4336 /* Check that the host interface is enabled. */
4337 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4338 if ((hicr & IXGBE_HICR_EN) == 0) {
4339 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4340 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4341 goto out;
4342 }
4343
4344 /* Calculate length in DWORDs */
4345 dword_len = length >> 2;
4346
4347 /*
4348 * The device driver writes the relevant command block
4349 * into the ram area.
4350 */
4351 for (i = 0; i < dword_len; i++)
4352 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4353 i, IXGBE_CPU_TO_LE32(buffer[i]));
4354
4355 /* Setting this bit tells the ARC that a new command is pending. */
4356 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4357
4358 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4359 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4360 if (!(hicr & IXGBE_HICR_C))
4361 break;
4362 msec_delay(1);
4363 }
4364
4365 /* Check command successful completion. */
4366 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
4367 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
4368 DEBUGOUT("Command has failed with no status valid.\n");
4369 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4370 goto out;
4371 }
4372
4373 /* Calculate length in DWORDs */
4374 dword_len = hdr_size >> 2;
4375
4376 /* first pull in the header so we know the buffer length */
4377 for (bi = 0; bi < dword_len; bi++) {
4378 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4379 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4380 }
4381
4382 /* If there is any thing in data position pull it in */
4383 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4384 if (buf_len == 0)
4385 goto out;
4386
4387 if (length < (buf_len + hdr_size)) {
4388 DEBUGOUT("Buffer not large enough for reply message.\n");
4389 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4390 goto out;
4391 }
4392
4393 /* Calculate length in DWORDs, add 3 for odd lengths */
4394 dword_len = (buf_len + 3) >> 2;
4395
4396 /* Pull in the rest of the buffer (bi is where we left off)*/
4397 for (; bi <= dword_len; bi++) {
4398 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4399 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4400 }
4401
4402out:
4403 return ret_val;
4404}
4405
4406/**
4407 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4408 * @hw: pointer to the HW structure
4409 * @maj: driver version major number
4410 * @minr: driver version minor number
4411 * @build: driver version build number
4412 * @sub: driver version sub build number
4413 *
4414 * Sends driver version number to firmware through the manageability
4415 * block. On success return IXGBE_SUCCESS
4416 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4417 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4418 **/
4419s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr,
4420 u8 build, u8 sub)
4421{
4422 struct ixgbe_hic_drv_info fw_cmd;
4423 int i;
4424 s32 ret_val = IXGBE_SUCCESS;
4425
4426 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4427
4428 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4429 != IXGBE_SUCCESS) {
4430 ret_val = IXGBE_ERR_SWFW_SYNC;
4431 goto out;
4432 }
4433
4434 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4435 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4436 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4437 fw_cmd.port_num = (u8)hw->bus.func;
4438 fw_cmd.ver_maj = maj;
4439 fw_cmd.ver_min = minr;
4440 fw_cmd.ver_build = build;
4441 fw_cmd.ver_sub = sub;
4442 fw_cmd.hdr.checksum = 0;
4443 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4444 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4445 fw_cmd.pad = 0;
4446 fw_cmd.pad2 = 0;
4447
4448 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4449 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4450 sizeof(fw_cmd));
4451 if (ret_val != IXGBE_SUCCESS)
4452 continue;
4453
4454 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4455 FW_CEM_RESP_STATUS_SUCCESS)
4456 ret_val = IXGBE_SUCCESS;
4457 else
4458 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4459
4460 break;
4461 }
4462
4463 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4464out:
4465 return ret_val;
4466}
4467
4468/**
4469 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4470 * @hw: pointer to hardware structure
4471 * @num_pb: number of packet buffers to allocate
4472 * @headroom: reserve n KB of headroom
4473 * @strategy: packet buffer allocation strategy
4474 **/
4475void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4476 int strategy)
4477{
4478 u32 pbsize = hw->mac.rx_pb_size;
4479 int i = 0;
4480 u32 rxpktsize, txpktsize, txpbthresh;
4481
4482 /* Reserve headroom */
4483 pbsize -= headroom;
4484
4485 if (!num_pb)
4486 num_pb = 1;
4487
4488 /* Divide remaining packet buffer space amongst the number of packet
4489 * buffers requested using supplied strategy.
4490 */
4491 switch (strategy) {
4492 case PBA_STRATEGY_WEIGHTED:
4493 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4494 * buffer with 5/8 of the packet buffer space.
4495 */
4496 rxpktsize = (pbsize * 5) / (num_pb * 4);
4497 pbsize -= rxpktsize * (num_pb / 2);
4498 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4499 for (; i < (num_pb / 2); i++)
4500 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4501 /* Fall through to configure remaining packet buffers */
4502 case PBA_STRATEGY_EQUAL:
4503 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4504 for (; i < num_pb; i++)
4505 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4506 break;
4507 default:
4508 break;
4509 }
4510
4511 /* Only support an equally distributed Tx packet buffer strategy. */
4512 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4513 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4514 for (i = 0; i < num_pb; i++) {
4515 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4516 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4517 }
4518
4519 /* Clear unused TCs, if any, to zero buffer size*/
4520 for (; i < IXGBE_MAX_PB; i++) {
4521 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4522 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4523 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4524 }
4525}
4526
4527/**
4528 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4529 * @hw: pointer to the hardware structure
4530 *
4531 * The 82599 and x540 MACs can experience issues if TX work is still pending
4532 * when a reset occurs. This function prevents this by flushing the PCIe
4533 * buffers on the system.
4534 **/
4535void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4536{
4537 u32 gcr_ext, hlreg0;
4538
4539 /*
4540 * If double reset is not requested then all transactions should
4541 * already be clear and as such there is no work to do
4542 */
4543 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4544 return;
4545
4546 /*
4547 * Set loopback enable to prevent any transmits from being sent
4548 * should the link come up. This assumes that the RXCTRL.RXEN bit
4549 * has already been cleared.
4550 */
4551 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4552 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4553
4554 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4555 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4556 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4557 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4558
4559 /* Flush all writes and allow 20usec for all transactions to clear */
4560 IXGBE_WRITE_FLUSH(hw);
4561 usec_delay(20);
4562
4563 /* restore previous register values */
4564 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4565 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4566}
4567
4568
4569/**
4570 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4571 * @hw: pointer to hardware structure
4572 * @map: pointer to u8 arr for returning map
4573 *
4574 * Read the rtrup2tc HW register and resolve its content into map
4575 **/
4576void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4577{
4578 u32 reg, i;
4579
4580 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4581 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4582 map[i] = IXGBE_RTRUP2TC_UP_MASK &
4583 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4584 return;
4585}
4586