Created
June 2, 2014 05:26
-
-
Save hiroyuki-sato/5fa30f79671bbfa042bf to your computer and use it in GitHub Desktop.
diff e1000e driver between linux v3.14 and VyOS (3.8)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff -ru e1000e /path/to/linux/drivers/net/ethernet/intel/e1000e | |
diff -ru e1000e/80003es2lan.c /home/arch/linux/drivers/net/ethernet/intel/e1000e/80003es2lan.c | |
--- e1000e/80003es2lan.c 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/80003es2lan.c 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -26,82 +26,20 @@ | |
*******************************************************************************/ | |
-/* | |
- * 80003ES2LAN Gigabit Ethernet Controller (Copper) | |
+/* 80003ES2LAN Gigabit Ethernet Controller (Copper) | |
* 80003ES2LAN Gigabit Ethernet Controller (Serdes) | |
*/ | |
#include "e1000.h" | |
-#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 | |
-#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 | |
-#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 | |
-#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F | |
- | |
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 | |
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 | |
-#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 | |
- | |
-#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 | |
-#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 | |
-#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 | |
- | |
-#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C | |
-#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 | |
- | |
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ | |
-#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 | |
- | |
-#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 | |
-#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 | |
- | |
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ | |
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ | |
-#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 | |
-#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ | |
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ | |
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ | |
- | |
-/* PHY Specific Control Register 2 (Page 0, Register 26) */ | |
-#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 | |
- /* 1=Reverse Auto-Negotiation */ | |
- | |
-/* MAC Specific Control Register (Page 2, Register 21) */ | |
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ | |
-#define GG82563_MSCR_TX_CLK_MASK 0x0007 | |
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 | |
-#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 | |
-#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 | |
- | |
-#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ | |
- | |
-/* DSP Distance Register (Page 5, Register 26) */ | |
-#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M | |
- 1 = 50-80M | |
- 2 = 80-110M | |
- 3 = 110-140M | |
- 4 = >140M */ | |
- | |
-/* Kumeran Mode Control Register (Page 193, Register 16) */ | |
-#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 | |
- | |
-/* Max number of times Kumeran read/write should be validated */ | |
-#define GG82563_MAX_KMRN_RETRY 0x5 | |
- | |
-/* Power Management Control Register (Page 193, Register 20) */ | |
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 | |
- /* 1=Enable SERDES Electrical Idle */ | |
- | |
-/* In-Band Control Register (Page 194, Register 18) */ | |
-#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ | |
- | |
-/* | |
- * A table for the GG82563 cable length where the range is defined | |
+/* A table for the GG82563 cable length where the range is defined | |
* with a lower bound at "index" and the upper bound at | |
* "index + 5". | |
*/ | |
static const u16 e1000_gg82563_cable_length_table[] = { | |
- 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; | |
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF | |
+}; | |
+ | |
#define GG82563_CABLE_LENGTH_TABLE_SIZE \ | |
ARRAY_SIZE(e1000_gg82563_cable_length_table) | |
@@ -112,11 +50,10 @@ | |
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); | |
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); | |
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); | |
-static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); | |
-static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |
- u16 *data); | |
-static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |
- u16 data); | |
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |
+ u16 *data); | |
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |
+ u16 data); | |
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); | |
/** | |
@@ -129,17 +66,17 @@ | |
s32 ret_val; | |
if (hw->phy.media_type != e1000_media_type_copper) { | |
- phy->type = e1000_phy_none; | |
+ phy->type = e1000_phy_none; | |
return 0; | |
} else { | |
phy->ops.power_up = e1000_power_up_phy_copper; | |
phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; | |
} | |
- phy->addr = 1; | |
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | |
- phy->reset_delay_us = 100; | |
- phy->type = e1000_phy_gg82563; | |
+ phy->addr = 1; | |
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | |
+ phy->reset_delay_us = 100; | |
+ phy->type = e1000_phy_gg82563; | |
/* This can only be done after all function pointers are setup. */ | |
ret_val = e1000e_get_phy_id(hw); | |
@@ -161,19 +98,19 @@ | |
u32 eecd = er32(EECD); | |
u16 size; | |
- nvm->opcode_bits = 8; | |
- nvm->delay_usec = 1; | |
+ nvm->opcode_bits = 8; | |
+ nvm->delay_usec = 1; | |
switch (nvm->override) { | |
case e1000_nvm_override_spi_large: | |
- nvm->page_size = 32; | |
+ nvm->page_size = 32; | |
nvm->address_bits = 16; | |
break; | |
case e1000_nvm_override_spi_small: | |
- nvm->page_size = 8; | |
+ nvm->page_size = 8; | |
nvm->address_bits = 8; | |
break; | |
default: | |
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; | |
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; | |
nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; | |
break; | |
} | |
@@ -181,10 +118,9 @@ | |
nvm->type = e1000_nvm_eeprom_spi; | |
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> | |
- E1000_EECD_SIZE_EX_SHIFT); | |
+ E1000_EECD_SIZE_EX_SHIFT); | |
- /* | |
- * Added to a constant, "size" becomes the left-shift value | |
+ /* Added to a constant, "size" becomes the left-shift value | |
* for setting word_size. | |
*/ | |
size += NVM_WORD_SIZE_BASE_SHIFT; | |
@@ -192,7 +128,7 @@ | |
/* EEPROM access above 16k is unsupported */ | |
if (size > 14) | |
size = 14; | |
- nvm->word_size = 1 << size; | |
+ nvm->word_size = 1 << size; | |
return 0; | |
} | |
@@ -201,19 +137,23 @@ | |
* e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. | |
* @hw: pointer to the HW structure | |
**/ | |
-static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) | |
{ | |
- struct e1000_hw *hw = &adapter->hw; | |
struct e1000_mac_info *mac = &hw->mac; | |
- struct e1000_mac_operations *func = &mac->ops; | |
- /* Set media type */ | |
- switch (adapter->pdev->device) { | |
+ /* Set media type and media-dependent function pointers */ | |
+ switch (hw->adapter->pdev->device) { | |
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | |
hw->phy.media_type = e1000_media_type_internal_serdes; | |
+ mac->ops.check_for_link = e1000e_check_for_serdes_link; | |
+ mac->ops.setup_physical_interface = | |
+ e1000e_setup_fiber_serdes_link; | |
break; | |
default: | |
hw->phy.media_type = e1000_media_type_copper; | |
+ mac->ops.check_for_link = e1000e_check_for_copper_link; | |
+ mac->ops.setup_physical_interface = | |
+ e1000_setup_copper_link_80003es2lan; | |
break; | |
} | |
@@ -224,31 +164,10 @@ | |
/* FWSM register */ | |
mac->has_fwsm = true; | |
/* ARC supported; valid only if manageability features are enabled. */ | |
- mac->arc_subsystem_valid = | |
- (er32(FWSM) & E1000_FWSM_MODE_MASK) | |
- ? true : false; | |
+ mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); | |
/* Adaptive IFS not supported */ | |
mac->adaptive_ifs = false; | |
- /* check for link */ | |
- switch (hw->phy.media_type) { | |
- case e1000_media_type_copper: | |
- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; | |
- func->check_for_link = e1000e_check_for_copper_link; | |
- break; | |
- case e1000_media_type_fiber: | |
- func->setup_physical_interface = e1000e_setup_fiber_serdes_link; | |
- func->check_for_link = e1000e_check_for_fiber_link; | |
- break; | |
- case e1000_media_type_internal_serdes: | |
- func->setup_physical_interface = e1000e_setup_fiber_serdes_link; | |
- func->check_for_link = e1000e_check_for_serdes_link; | |
- break; | |
- default: | |
- return -E1000_ERR_CONFIG; | |
- break; | |
- } | |
- | |
/* set lan id for port to determine which phy lock to use */ | |
hw->mac.ops.set_lan_id(hw); | |
@@ -260,7 +179,7 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
s32 rc; | |
- rc = e1000_init_mac_params_80003es2lan(adapter); | |
+ rc = e1000_init_mac_params_80003es2lan(hw); | |
if (rc) | |
return rc; | |
@@ -304,7 +223,7 @@ | |
} | |
/** | |
- * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register | |
+ * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register | |
* @hw: pointer to the HW structure | |
* | |
* Acquire the semaphore to access the Kumeran interface. | |
@@ -320,7 +239,7 @@ | |
} | |
/** | |
- * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register | |
+ * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register | |
* @hw: pointer to the HW structure | |
* | |
* Release the semaphore used to access the Kumeran interface | |
@@ -392,8 +311,7 @@ | |
if (!(swfw_sync & (fwmask | swmask))) | |
break; | |
- /* | |
- * Firmware currently using resource (fwmask) | |
+ /* Firmware currently using resource (fwmask) | |
* or other software thread using resource (swmask) | |
*/ | |
e1000e_put_hw_semaphore(hw); | |
@@ -459,8 +377,7 @@ | |
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | |
page_select = GG82563_PHY_PAGE_SELECT; | |
} else { | |
- /* | |
- * Use Alternative Page Select register to access | |
+ /* Use Alternative Page Select register to access | |
* registers 30 and 31 | |
*/ | |
page_select = GG82563_PHY_PAGE_SELECT_ALT; | |
@@ -473,34 +390,32 @@ | |
return ret_val; | |
} | |
- if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { | |
- /* | |
- * The "ready" bit in the MDIC register may be incorrectly set | |
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { | |
+ /* The "ready" bit in the MDIC register may be incorrectly set | |
* before the device has completed the "Page Select" MDI | |
* transaction. So we wait 200us after each MDI command... | |
*/ | |
- udelay(200); | |
+ usleep_range(200, 400); | |
/* ...and verify the command was successful. */ | |
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); | |
if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { | |
- ret_val = -E1000_ERR_PHY; | |
e1000_release_phy_80003es2lan(hw); | |
- return ret_val; | |
+ return -E1000_ERR_PHY; | |
} | |
- udelay(200); | |
+ usleep_range(200, 400); | |
ret_val = e1000e_read_phy_reg_mdic(hw, | |
- MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
+ MAX_PHY_REG_ADDRESS & offset, | |
+ data); | |
- udelay(200); | |
+ usleep_range(200, 400); | |
} else { | |
ret_val = e1000e_read_phy_reg_mdic(hw, | |
- MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
+ MAX_PHY_REG_ADDRESS & offset, | |
+ data); | |
} | |
e1000_release_phy_80003es2lan(hw); | |
@@ -531,8 +446,7 @@ | |
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | |
page_select = GG82563_PHY_PAGE_SELECT; | |
} else { | |
- /* | |
- * Use Alternative Page Select register to access | |
+ /* Use Alternative Page Select register to access | |
* registers 30 and 31 | |
*/ | |
page_select = GG82563_PHY_PAGE_SELECT_ALT; | |
@@ -545,13 +459,12 @@ | |
return ret_val; | |
} | |
- if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { | |
- /* | |
- * The "ready" bit in the MDIC register may be incorrectly set | |
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { | |
+ /* The "ready" bit in the MDIC register may be incorrectly set | |
* before the device has completed the "Page Select" MDI | |
* transaction. So we wait 200us after each MDI command... | |
*/ | |
- udelay(200); | |
+ usleep_range(200, 400); | |
/* ...and verify the command was successful. */ | |
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); | |
@@ -561,17 +474,17 @@ | |
return -E1000_ERR_PHY; | |
} | |
- udelay(200); | |
+ usleep_range(200, 400); | |
ret_val = e1000e_write_phy_reg_mdic(hw, | |
- MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
+ MAX_PHY_REG_ADDRESS & | |
+ offset, data); | |
- udelay(200); | |
+ usleep_range(200, 400); | |
} else { | |
ret_val = e1000e_write_phy_reg_mdic(hw, | |
- MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
+ MAX_PHY_REG_ADDRESS & | |
+ offset, data); | |
} | |
e1000_release_phy_80003es2lan(hw); | |
@@ -636,8 +549,7 @@ | |
u16 phy_data; | |
bool link; | |
- /* | |
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | |
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | |
* forced whenever speed and duplex are forced. | |
*/ | |
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | |
@@ -651,33 +563,31 @@ | |
e_dbg("GG82563 PSCR: %X\n", phy_data); | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); | |
if (ret_val) | |
return ret_val; | |
e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | |
/* Reset the phy to commit changes. */ | |
- phy_data |= MII_CR_RESET; | |
+ phy_data |= BMCR_RESET; | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | |
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data); | |
if (ret_val) | |
return ret_val; | |
udelay(1); | |
if (hw->phy.autoneg_wait_to_complete) { | |
- e_dbg("Waiting for forced speed/duplex link " | |
- "on GG82563 phy.\n"); | |
+ e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); | |
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
- 100000, &link); | |
+ 100000, &link); | |
if (ret_val) | |
return ret_val; | |
if (!link) { | |
- /* | |
- * We didn't get link. | |
+ /* We didn't get link. | |
* Reset the DSP and cross our fingers. | |
*/ | |
ret_val = e1000e_phy_reset_dsp(hw); | |
@@ -687,7 +597,7 @@ | |
/* Try once more */ | |
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
- 100000, &link); | |
+ 100000, &link); | |
if (ret_val) | |
return ret_val; | |
} | |
@@ -696,8 +606,7 @@ | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Resetting the phy means we need to verify the TX_CLK corresponds | |
+ /* Resetting the phy means we need to verify the TX_CLK corresponds | |
* to the link speed. 10Mbps -> 2.5MHz, else 25MHz. | |
*/ | |
phy_data &= ~GG82563_MSCR_TX_CLK_MASK; | |
@@ -706,8 +615,7 @@ | |
else | |
phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; | |
- /* | |
- * In addition, we must re-enable CRS on Tx for both half and full | |
+ /* In addition, we must re-enable CRS on Tx for both half and full | |
* duplex. | |
*/ | |
phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | |
@@ -726,27 +634,24 @@ | |
static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | |
{ | |
struct e1000_phy_info *phy = &hw->phy; | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
u16 phy_data, index; | |
ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
index = phy_data & GG82563_DSPD_CABLE_LENGTH; | |
- if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { | |
- ret_val = -E1000_ERR_PHY; | |
- goto out; | |
- } | |
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) | |
+ return -E1000_ERR_PHY; | |
phy->min_cable_length = e1000_gg82563_cable_length_table[index]; | |
phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; | |
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -763,14 +668,12 @@ | |
s32 ret_val; | |
if (hw->phy.media_type == e1000_media_type_copper) { | |
- ret_val = e1000e_get_speed_and_duplex_copper(hw, | |
- speed, | |
- duplex); | |
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); | |
hw->phy.ops.cfg_on_link_up(hw); | |
} else { | |
ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, | |
- speed, | |
- duplex); | |
+ speed, | |
+ duplex); | |
} | |
return ret_val; | |
@@ -786,9 +689,9 @@ | |
{ | |
u32 ctrl; | |
s32 ret_val; | |
+ u16 kum_reg_data; | |
- /* | |
- * Prevent the PCI-E bus from sticking if there is no TLP connection | |
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection | |
* on the last TLP read/write transaction when MAC is reset. | |
*/ | |
ret_val = e1000e_disable_pcie_master(hw); | |
@@ -807,10 +710,23 @@ | |
ctrl = er32(CTRL); | |
ret_val = e1000_acquire_phy_80003es2lan(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
e_dbg("Issuing a global reset to MAC\n"); | |
ew32(CTRL, ctrl | E1000_CTRL_RST); | |
e1000_release_phy_80003es2lan(hw); | |
+ /* Disable IBIST slave mode (far-end loopback) */ | |
+ ret_val = | |
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | |
+ &kum_reg_data); | |
+ if (ret_val) | |
+ return ret_val; | |
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; | |
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | |
+ kum_reg_data); | |
+ | |
ret_val = e1000e_get_auto_rd_done(hw); | |
if (ret_val) | |
/* We don't want to continue accessing MAC registers. */ | |
@@ -820,9 +736,7 @@ | |
ew32(IMC, 0xffffffff); | |
er32(ICR); | |
- ret_val = e1000_check_alt_mac_addr_generic(hw); | |
- | |
- return ret_val; | |
+ return e1000_check_alt_mac_addr_generic(hw); | |
} | |
/** | |
@@ -842,10 +756,10 @@ | |
e1000_initialize_hw_bits_80003es2lan(hw); | |
/* Initialize identification LED */ | |
- ret_val = e1000e_id_led_init(hw); | |
+ ret_val = mac->ops.id_led_init(hw); | |
+ /* An error is not fatal and we should not stop init due to this */ | |
if (ret_val) | |
e_dbg("Error initializing identification LED\n"); | |
- /* This is not fatal and we should not stop init due to this */ | |
/* Disabling VLAN filtering */ | |
e_dbg("Initializing the IEEE VLAN\n"); | |
@@ -860,7 +774,9 @@ | |
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | |
/* Setup link and flow control */ | |
- ret_val = e1000e_setup_link(hw); | |
+ ret_val = mac->ops.setup_link(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
/* Disable IBIST slave mode (far-end loopback) */ | |
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | |
@@ -871,14 +787,14 @@ | |
/* Set the transmit descriptor write-back policy */ | |
reg_data = er32(TXDCTL(0)); | |
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; | |
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | | |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); | |
ew32(TXDCTL(0), reg_data); | |
/* ...for both queues. */ | |
reg_data = er32(TXDCTL(1)); | |
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; | |
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | | |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); | |
ew32(TXDCTL(1), reg_data); | |
/* Enable retransmit on late collisions */ | |
@@ -905,18 +821,16 @@ | |
/* default to true to enable the MDIC W/A */ | |
hw->dev_spec.e80003es2lan.mdic_wa_enable = true; | |
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET >> | |
- E1000_KMRNCTRLSTA_OFFSET_SHIFT, | |
- &i); | |
+ ret_val = | |
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> | |
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); | |
if (!ret_val) { | |
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == | |
- E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) | |
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) | |
hw->dev_spec.e80003es2lan.mdic_wa_enable = false; | |
} | |
- /* | |
- * Clear all of the statistics registers (clear on read). It is | |
+ /* Clear all of the statistics registers (clear on read). It is | |
* important that we do this after we have tried to establish link | |
* because the symbol error count will increment wildly if there | |
* is no link. | |
@@ -948,7 +862,7 @@ | |
/* Transmit Arbitration Control 0 */ | |
reg = er32(TARC(0)); | |
- reg &= ~(0xF << 27); /* 30:27 */ | |
+ reg &= ~(0xF << 27); /* 30:27 */ | |
if (hw->phy.media_type != e1000_media_type_copper) | |
reg &= ~(1 << 20); | |
ew32(TARC(0), reg); | |
@@ -960,6 +874,13 @@ | |
else | |
reg |= (1 << 28); | |
ew32(TARC(1), reg); | |
+ | |
+ /* Disable IPv6 extension header parsing because some malformed | |
+ * IPv6 headers can hang the Rx. | |
+ */ | |
+ reg = er32(RFCTL); | |
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); | |
+ ew32(RFCTL, reg); | |
} | |
/** | |
@@ -972,7 +893,7 @@ | |
{ | |
struct e1000_phy_info *phy = &hw->phy; | |
s32 ret_val; | |
- u32 ctrl_ext; | |
+ u32 reg; | |
u16 data; | |
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); | |
@@ -987,8 +908,7 @@ | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Options: | |
+ /* Options: | |
* MDI/MDI-X = 0 (default) | |
* 0 - Auto for all speeds | |
* 1 - MDI mode | |
@@ -1014,8 +934,7 @@ | |
break; | |
} | |
- /* | |
- * Options: | |
+ /* Options: | |
* disable_polarity_correction = 0 (default) | |
* Automatic Correction for Reversed Cable Polarity | |
* 0 - Disabled | |
@@ -1030,29 +949,26 @@ | |
return ret_val; | |
/* SW Reset the PHY so all changes take effect */ | |
- ret_val = e1000e_commit_phy(hw); | |
+ ret_val = hw->phy.ops.commit(hw); | |
if (ret_val) { | |
e_dbg("Error Resetting the PHY\n"); | |
return ret_val; | |
} | |
/* Bypass Rx and Tx FIFO's */ | |
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, | |
- E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | | |
- E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); | |
+ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; | |
+ data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | | |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); | |
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); | |
if (ret_val) | |
return ret_val; | |
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, | |
- &data); | |
+ reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; | |
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); | |
if (ret_val) | |
return ret_val; | |
data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; | |
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, | |
- data); | |
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); | |
if (ret_val) | |
return ret_val; | |
@@ -1065,20 +981,19 @@ | |
if (ret_val) | |
return ret_val; | |
- ctrl_ext = er32(CTRL_EXT); | |
- ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); | |
- ew32(CTRL_EXT, ctrl_ext); | |
+ reg = er32(CTRL_EXT); | |
+ reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; | |
+ ew32(CTRL_EXT, reg); | |
ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Do not init these registers when the HW is in IAMT mode, since the | |
+ /* Do not init these registers when the HW is in IAMT mode, since the | |
* firmware will have already initialized them. We only initialize | |
* them if the HW is not in IAMT mode. | |
*/ | |
- if (!e1000e_check_mng_mode(hw)) { | |
+ if (!hw->mac.ops.check_mng_mode(hw)) { | |
/* Enable Electrical Idle on the PHY */ | |
data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; | |
ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); | |
@@ -1095,8 +1010,7 @@ | |
return ret_val; | |
} | |
- /* | |
- * Workaround: Disable padding in Kumeran interface in the MAC | |
+ /* Workaround: Disable padding in Kumeran interface in the MAC | |
* and in the PHY to avoid CRC errors. | |
*/ | |
ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); | |
@@ -1129,33 +1043,34 @@ | |
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | |
ew32(CTRL, ctrl); | |
- /* | |
- * Set the mac to wait the maximum time between each | |
+ /* Set the mac to wait the maximum time between each | |
* iteration and increase the max iterations when | |
* polling the phy; this fixes erroneous timeouts at 10Mbps. | |
*/ | |
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), | |
- 0xFFFF); | |
+ 0xFFFF); | |
if (ret_val) | |
return ret_val; | |
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), | |
- ®_data); | |
+ ®_data); | |
if (ret_val) | |
return ret_val; | |
reg_data |= 0x3F; | |
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), | |
- reg_data); | |
+ reg_data); | |
if (ret_val) | |
return ret_val; | |
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, | |
- ®_data); | |
+ ret_val = | |
+ e1000_read_kmrn_reg_80003es2lan(hw, | |
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, | |
+ ®_data); | |
if (ret_val) | |
return ret_val; | |
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; | |
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, | |
- reg_data); | |
+ ret_val = | |
+ e1000_write_kmrn_reg_80003es2lan(hw, | |
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, | |
+ reg_data); | |
if (ret_val) | |
return ret_val; | |
@@ -1163,9 +1078,7 @@ | |
if (ret_val) | |
return ret_val; | |
- ret_val = e1000e_setup_copper_link(hw); | |
- | |
- return 0; | |
+ return e1000e_setup_copper_link(hw); | |
} | |
/** | |
@@ -1184,7 +1097,7 @@ | |
if (hw->phy.media_type == e1000_media_type_copper) { | |
ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, | |
- &duplex); | |
+ &duplex); | |
if (ret_val) | |
return ret_val; | |
@@ -1213,9 +1126,10 @@ | |
u16 reg_data, reg_data2; | |
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; | |
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | |
- reg_data); | |
+ ret_val = | |
+ e1000_write_kmrn_reg_80003es2lan(hw, | |
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | |
+ reg_data); | |
if (ret_val) | |
return ret_val; | |
@@ -1241,9 +1155,7 @@ | |
else | |
reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | |
- ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | |
- | |
- return 0; | |
+ return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | |
} | |
/** | |
@@ -1261,9 +1173,10 @@ | |
u32 i = 0; | |
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; | |
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | |
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | |
- reg_data); | |
+ ret_val = | |
+ e1000_write_kmrn_reg_80003es2lan(hw, | |
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | |
+ reg_data); | |
if (ret_val) | |
return ret_val; | |
@@ -1285,9 +1198,8 @@ | |
} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); | |
reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | |
- ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | |
- return ret_val; | |
+ return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | |
} | |
/** | |
@@ -1304,14 +1216,14 @@ | |
u16 *data) | |
{ | |
u32 kmrnctrlsta; | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
ret_val = e1000_acquire_mac_csr_80003es2lan(hw); | |
if (ret_val) | |
return ret_val; | |
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | |
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; | |
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; | |
ew32(KMRNCTRLSTA, kmrnctrlsta); | |
e1e_flush(); | |
@@ -1339,14 +1251,14 @@ | |
u16 data) | |
{ | |
u32 kmrnctrlsta; | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
ret_val = e1000_acquire_mac_csr_80003es2lan(hw); | |
if (ret_val) | |
return ret_val; | |
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | |
- E1000_KMRNCTRLSTA_OFFSET) | data; | |
+ E1000_KMRNCTRLSTA_OFFSET) | data; | |
ew32(KMRNCTRLSTA, kmrnctrlsta); | |
e1e_flush(); | |
@@ -1363,21 +1275,17 @@ | |
**/ | |
static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) | |
{ | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
- /* | |
- * If there's an alternate MAC address place it in RAR0 | |
+ /* If there's an alternate MAC address place it in RAR0 | |
* so that it will override the Si installed default perm | |
* address. | |
*/ | |
ret_val = e1000_check_alt_mac_addr_generic(hw); | |
if (ret_val) | |
- goto out; | |
- | |
- ret_val = e1000_read_mac_addr_generic(hw); | |
+ return ret_val; | |
-out: | |
- return ret_val; | |
+ return e1000_read_mac_addr_generic(hw); | |
} | |
/** | |
@@ -1443,7 +1351,7 @@ | |
static const struct e1000_mac_operations es2_mac_ops = { | |
.read_mac_addr = e1000_read_mac_addr_80003es2lan, | |
- .id_led_init = e1000e_id_led_init, | |
+ .id_led_init = e1000e_id_led_init_generic, | |
.blink_led = e1000e_blink_led_generic, | |
.check_mng_mode = e1000e_check_mng_mode_generic, | |
/* check_for_link dependent on media type */ | |
@@ -1459,33 +1367,36 @@ | |
.clear_vfta = e1000_clear_vfta_generic, | |
.reset_hw = e1000_reset_hw_80003es2lan, | |
.init_hw = e1000_init_hw_80003es2lan, | |
- .setup_link = e1000e_setup_link, | |
+ .setup_link = e1000e_setup_link_generic, | |
/* setup_physical_interface dependent on media type */ | |
.setup_led = e1000e_setup_led_generic, | |
+ .config_collision_dist = e1000e_config_collision_dist_generic, | |
+ .rar_set = e1000e_rar_set_generic, | |
}; | |
static const struct e1000_phy_operations es2_phy_ops = { | |
.acquire = e1000_acquire_phy_80003es2lan, | |
.check_polarity = e1000_check_polarity_m88, | |
.check_reset_block = e1000e_check_reset_block_generic, | |
- .commit = e1000e_phy_sw_reset, | |
- .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, | |
- .get_cfg_done = e1000_get_cfg_done_80003es2lan, | |
- .get_cable_length = e1000_get_cable_length_80003es2lan, | |
- .get_info = e1000e_get_phy_info_m88, | |
- .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, | |
+ .commit = e1000e_phy_sw_reset, | |
+ .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, | |
+ .get_cfg_done = e1000_get_cfg_done_80003es2lan, | |
+ .get_cable_length = e1000_get_cable_length_80003es2lan, | |
+ .get_info = e1000e_get_phy_info_m88, | |
+ .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, | |
.release = e1000_release_phy_80003es2lan, | |
- .reset = e1000e_phy_hw_reset_generic, | |
- .set_d0_lplu_state = NULL, | |
- .set_d3_lplu_state = e1000e_set_d3_lplu_state, | |
- .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, | |
- .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, | |
+ .reset = e1000e_phy_hw_reset_generic, | |
+ .set_d0_lplu_state = NULL, | |
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state, | |
+ .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, | |
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, | |
}; | |
static const struct e1000_nvm_operations es2_nvm_ops = { | |
.acquire = e1000_acquire_nvm_80003es2lan, | |
.read = e1000e_read_nvm_eerd, | |
.release = e1000_release_nvm_80003es2lan, | |
+ .reload = e1000e_reload_nvm_generic, | |
.update = e1000e_update_nvm_checksum_generic, | |
.valid_led_default = e1000e_valid_led_default, | |
.validate = e1000e_validate_nvm_checksum_generic, | |
@@ -1502,8 +1413,7 @@ | |
| FLAG_RX_NEEDS_RESTART /* errata */ | |
| FLAG_TARC_SET_BIT_ZERO /* errata */ | |
| FLAG_APME_CHECK_PORT_B | |
- | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | |
- | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, | |
+ | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */ | |
.flags2 = FLAG2_DMA_BURST, | |
.pba = 38, | |
.max_hw_frame_size = DEFAULT_JUMBO, | |
@@ -1512,4 +1422,3 @@ | |
.phy_ops = &es2_phy_ops, | |
.nvm_ops = &es2_nvm_ops, | |
}; | |
- | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: 80003es2lan.h | |
diff -ru e1000e/82571.c /home/arch/linux/drivers/net/ethernet/intel/e1000e/82571.c | |
--- e1000e/82571.c 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/82571.c 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -26,8 +26,7 @@ | |
*******************************************************************************/ | |
-/* | |
- * 82571EB Gigabit Ethernet Controller | |
+/* 82571EB Gigabit Ethernet Controller | |
* 82571EB Gigabit Ethernet Controller (Copper) | |
* 82571EB Gigabit Ethernet Controller (Fiber) | |
* 82571EB Dual Port Gigabit Mezzanine Adapter | |
@@ -45,21 +44,6 @@ | |
#include "e1000.h" | |
-#define ID_LED_RESERVED_F746 0xF746 | |
-#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ | |
- (ID_LED_OFF1_ON2 << 8) | \ | |
- (ID_LED_DEF1_DEF2 << 4) | \ | |
- (ID_LED_DEF1_DEF2)) | |
- | |
-#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 | |
-#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ | |
-#define E1000_BASE1000T_STATUS 10 | |
-#define E1000_IDLE_ERROR_COUNT_MASK 0xFF | |
-#define E1000_RECEIVE_ERROR_COUNTER 21 | |
-#define E1000_RECEIVE_ERROR_MAX 0xFFFF | |
- | |
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ | |
- | |
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); | |
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); | |
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); | |
@@ -68,9 +52,7 @@ | |
u16 words, u16 *data); | |
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); | |
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); | |
-static s32 e1000_setup_link_82571(struct e1000_hw *hw); | |
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); | |
-static void e1000_clear_vfta_82571(struct e1000_hw *hw); | |
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); | |
static s32 e1000_led_on_82574(struct e1000_hw *hw); | |
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); | |
@@ -95,24 +77,24 @@ | |
return 0; | |
} | |
- phy->addr = 1; | |
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | |
- phy->reset_delay_us = 100; | |
+ phy->addr = 1; | |
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | |
+ phy->reset_delay_us = 100; | |
- phy->ops.power_up = e1000_power_up_phy_copper; | |
- phy->ops.power_down = e1000_power_down_phy_copper_82571; | |
+ phy->ops.power_up = e1000_power_up_phy_copper; | |
+ phy->ops.power_down = e1000_power_down_phy_copper_82571; | |
switch (hw->mac.type) { | |
case e1000_82571: | |
case e1000_82572: | |
- phy->type = e1000_phy_igp_2; | |
+ phy->type = e1000_phy_igp_2; | |
break; | |
case e1000_82573: | |
- phy->type = e1000_phy_m88; | |
+ phy->type = e1000_phy_m88; | |
break; | |
case e1000_82574: | |
case e1000_82583: | |
- phy->type = e1000_phy_bm; | |
+ phy->type = e1000_phy_bm; | |
phy->ops.acquire = e1000_get_hw_semaphore_82574; | |
phy->ops.release = e1000_put_hw_semaphore_82574; | |
phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; | |
@@ -191,8 +173,7 @@ | |
if (((eecd >> 15) & 0x3) == 0x3) { | |
nvm->type = e1000_nvm_flash_hw; | |
nvm->word_size = 2048; | |
- /* | |
- * Autonomous Flash update bit must be cleared due | |
+ /* Autonomous Flash update bit must be cleared due | |
* to Flash update issue. | |
*/ | |
eecd &= ~E1000_EECD_AUPDEN; | |
@@ -203,9 +184,8 @@ | |
default: | |
nvm->type = e1000_nvm_eeprom_spi; | |
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> | |
- E1000_EECD_SIZE_EX_SHIFT); | |
- /* | |
- * Added to a constant, "size" becomes the left-shift value | |
+ E1000_EECD_SIZE_EX_SHIFT); | |
+ /* Added to a constant, "size" becomes the left-shift value | |
* for setting word_size. | |
*/ | |
size += NVM_WORD_SIZE_BASE_SHIFT; | |
@@ -213,7 +193,7 @@ | |
/* EEPROM access above 16k is unsupported */ | |
if (size > 14) | |
size = 14; | |
- nvm->word_size = 1 << size; | |
+ nvm->word_size = 1 << size; | |
break; | |
} | |
@@ -235,30 +215,42 @@ | |
* e1000_init_mac_params_82571 - Init MAC func ptrs. | |
* @hw: pointer to the HW structure | |
**/ | |
-static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |
+static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) | |
{ | |
- struct e1000_hw *hw = &adapter->hw; | |
struct e1000_mac_info *mac = &hw->mac; | |
- struct e1000_mac_operations *func = &mac->ops; | |
u32 swsm = 0; | |
u32 swsm2 = 0; | |
bool force_clear_smbi = false; | |
- /* Set media type */ | |
- switch (adapter->pdev->device) { | |
+ /* Set media type and media-dependent function pointers */ | |
+ switch (hw->adapter->pdev->device) { | |
case E1000_DEV_ID_82571EB_FIBER: | |
case E1000_DEV_ID_82572EI_FIBER: | |
case E1000_DEV_ID_82571EB_QUAD_FIBER: | |
hw->phy.media_type = e1000_media_type_fiber; | |
+ mac->ops.setup_physical_interface = | |
+ e1000_setup_fiber_serdes_link_82571; | |
+ mac->ops.check_for_link = e1000e_check_for_fiber_link; | |
+ mac->ops.get_link_up_info = | |
+ e1000e_get_speed_and_duplex_fiber_serdes; | |
break; | |
case E1000_DEV_ID_82571EB_SERDES: | |
- case E1000_DEV_ID_82572EI_SERDES: | |
case E1000_DEV_ID_82571EB_SERDES_DUAL: | |
case E1000_DEV_ID_82571EB_SERDES_QUAD: | |
+ case E1000_DEV_ID_82572EI_SERDES: | |
hw->phy.media_type = e1000_media_type_internal_serdes; | |
+ mac->ops.setup_physical_interface = | |
+ e1000_setup_fiber_serdes_link_82571; | |
+ mac->ops.check_for_link = e1000_check_for_serdes_link_82571; | |
+ mac->ops.get_link_up_info = | |
+ e1000e_get_speed_and_duplex_fiber_serdes; | |
break; | |
default: | |
hw->phy.media_type = e1000_media_type_copper; | |
+ mac->ops.setup_physical_interface = | |
+ e1000_setup_copper_link_82571; | |
+ mac->ops.check_for_link = e1000e_check_for_copper_link; | |
+ mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper; | |
break; | |
} | |
@@ -269,67 +261,39 @@ | |
/* Adaptive IFS supported */ | |
mac->adaptive_ifs = true; | |
- /* check for link */ | |
- switch (hw->phy.media_type) { | |
- case e1000_media_type_copper: | |
- func->setup_physical_interface = e1000_setup_copper_link_82571; | |
- func->check_for_link = e1000e_check_for_copper_link; | |
- func->get_link_up_info = e1000e_get_speed_and_duplex_copper; | |
- break; | |
- case e1000_media_type_fiber: | |
- func->setup_physical_interface = | |
- e1000_setup_fiber_serdes_link_82571; | |
- func->check_for_link = e1000e_check_for_fiber_link; | |
- func->get_link_up_info = | |
- e1000e_get_speed_and_duplex_fiber_serdes; | |
- break; | |
- case e1000_media_type_internal_serdes: | |
- func->setup_physical_interface = | |
- e1000_setup_fiber_serdes_link_82571; | |
- func->check_for_link = e1000_check_for_serdes_link_82571; | |
- func->get_link_up_info = | |
- e1000e_get_speed_and_duplex_fiber_serdes; | |
- break; | |
- default: | |
- return -E1000_ERR_CONFIG; | |
- break; | |
- } | |
- | |
+ /* MAC-specific function pointers */ | |
switch (hw->mac.type) { | |
case e1000_82573: | |
- func->set_lan_id = e1000_set_lan_id_single_port; | |
- func->check_mng_mode = e1000e_check_mng_mode_generic; | |
- func->led_on = e1000e_led_on_generic; | |
- func->blink_led = e1000e_blink_led_generic; | |
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port; | |
+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; | |
+ mac->ops.led_on = e1000e_led_on_generic; | |
+ mac->ops.blink_led = e1000e_blink_led_generic; | |
/* FWSM register */ | |
mac->has_fwsm = true; | |
- /* | |
- * ARC supported; valid only if manageability features are | |
+ /* ARC supported; valid only if manageability features are | |
* enabled. | |
*/ | |
- mac->arc_subsystem_valid = | |
- (er32(FWSM) & E1000_FWSM_MODE_MASK) | |
- ? true : false; | |
+ mac->arc_subsystem_valid = !!(er32(FWSM) & | |
+ E1000_FWSM_MODE_MASK); | |
break; | |
case e1000_82574: | |
case e1000_82583: | |
- func->set_lan_id = e1000_set_lan_id_single_port; | |
- func->check_mng_mode = e1000_check_mng_mode_82574; | |
- func->led_on = e1000_led_on_82574; | |
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port; | |
+ mac->ops.check_mng_mode = e1000_check_mng_mode_82574; | |
+ mac->ops.led_on = e1000_led_on_82574; | |
break; | |
default: | |
- func->check_mng_mode = e1000e_check_mng_mode_generic; | |
- func->led_on = e1000e_led_on_generic; | |
- func->blink_led = e1000e_blink_led_generic; | |
+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; | |
+ mac->ops.led_on = e1000e_led_on_generic; | |
+ mac->ops.blink_led = e1000e_blink_led_generic; | |
/* FWSM register */ | |
mac->has_fwsm = true; | |
break; | |
} | |
- /* | |
- * Ensure that the inter-port SWSM.SMBI lock bit is clear before | |
+ /* Ensure that the inter-port SWSM.SMBI lock bit is clear before | |
* first NVM or PHY access. This should be done for single-port | |
* devices, and for one port only on dual-port devices so that | |
* for those devices we can still use the SMBI lock to synchronize | |
@@ -342,11 +306,11 @@ | |
if (!(swsm2 & E1000_SWSM2_LOCK)) { | |
/* Only do this for the first interface on this card */ | |
- ew32(SWSM2, | |
- swsm2 | E1000_SWSM2_LOCK); | |
+ ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK); | |
force_clear_smbi = true; | |
- } else | |
+ } else { | |
force_clear_smbi = false; | |
+ } | |
break; | |
default: | |
force_clear_smbi = true; | |
@@ -366,11 +330,8 @@ | |
ew32(SWSM, swsm & ~E1000_SWSM_SMBI); | |
} | |
- /* | |
- * Initialize device specific counter of SMBI acquisition | |
- * timeouts. | |
- */ | |
- hw->dev_spec.e82571.smb_counter = 0; | |
+ /* Initialize device specific counter of SMBI acquisition timeouts. */ | |
+ hw->dev_spec.e82571.smb_counter = 0; | |
return 0; | |
} | |
@@ -378,12 +339,12 @@ | |
static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | |
{ | |
struct e1000_hw *hw = &adapter->hw; | |
- static int global_quad_port_a; /* global port a indication */ | |
+ static int global_quad_port_a; /* global port a indication */ | |
struct pci_dev *pdev = adapter->pdev; | |
int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; | |
s32 rc; | |
- rc = e1000_init_mac_params_82571(adapter); | |
+ rc = e1000_init_mac_params_82571(hw); | |
if (rc) | |
return rc; | |
@@ -459,8 +420,7 @@ | |
switch (hw->mac.type) { | |
case e1000_82571: | |
case e1000_82572: | |
- /* | |
- * The 82571 firmware may still be configuring the PHY. | |
+ /* The 82571 firmware may still be configuring the PHY. | |
* In this case, we cannot access the PHY until the | |
* configuration is done. So we explicitly set the | |
* PHY ID. | |
@@ -472,13 +432,13 @@ | |
break; | |
case e1000_82574: | |
case e1000_82583: | |
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); | |
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); | |
if (ret_val) | |
return ret_val; | |
phy->id = (u32)(phy_id << 16); | |
- udelay(20); | |
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); | |
+ usleep_range(20, 40); | |
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); | |
if (ret_val) | |
return ret_val; | |
@@ -506,8 +466,7 @@ | |
s32 fw_timeout = hw->nvm.word_size + 1; | |
s32 i = 0; | |
- /* | |
- * If we have timedout 3 times on trying to acquire | |
+ /* If we have timedout 3 times on trying to acquire | |
* the inter-port SMBI semaphore, there is old code | |
* operating on the other port, and it is not | |
* releasing SMBI. Modify the number of times that | |
@@ -523,7 +482,7 @@ | |
if (!(swsm & E1000_SWSM_SMBI)) | |
break; | |
- udelay(50); | |
+ usleep_range(50, 100); | |
i++; | |
} | |
@@ -540,7 +499,7 @@ | |
if (er32(SWSM) & E1000_SWSM_SWESMBI) | |
break; | |
- udelay(50); | |
+ usleep_range(50, 100); | |
} | |
if (i == fw_timeout) { | |
@@ -567,6 +526,7 @@ | |
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | |
ew32(SWSM, swsm); | |
} | |
+ | |
/** | |
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore | |
* @hw: pointer to the HW structure | |
@@ -577,20 +537,17 @@ | |
static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) | |
{ | |
u32 extcnf_ctrl; | |
- s32 ret_val = 0; | |
s32 i = 0; | |
extcnf_ctrl = er32(EXTCNF_CTRL); | |
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | |
do { | |
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | |
ew32(EXTCNF_CTRL, extcnf_ctrl); | |
extcnf_ctrl = er32(EXTCNF_CTRL); | |
if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) | |
break; | |
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | |
- | |
usleep_range(2000, 4000); | |
i++; | |
} while (i < MDIO_OWNERSHIP_TIMEOUT); | |
@@ -599,12 +556,10 @@ | |
/* Release semaphores */ | |
e1000_put_hw_semaphore_82573(hw); | |
e_dbg("Driver can't access the PHY\n"); | |
- ret_val = -E1000_ERR_PHY; | |
- goto out; | |
+ return -E1000_ERR_PHY; | |
} | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -670,7 +625,7 @@ | |
**/ | |
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) | |
{ | |
- u16 data = er32(POEMB); | |
+ u32 data = er32(POEMB); | |
if (active) | |
data |= E1000_PHY_CTRL_D0A_LPLU; | |
@@ -694,7 +649,7 @@ | |
**/ | |
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) | |
{ | |
- u16 data = er32(POEMB); | |
+ u32 data = er32(POEMB); | |
if (!active) { | |
data &= ~E1000_PHY_CTRL_NOND0A_LPLU; | |
@@ -804,17 +759,16 @@ | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * If our nvm is an EEPROM, then we're done | |
+ /* If our nvm is an EEPROM, then we're done | |
* otherwise, commit the checksum to the flash NVM. | |
*/ | |
if (hw->nvm.type != e1000_nvm_flash_hw) | |
- return ret_val; | |
+ return 0; | |
/* Check for pending operations. */ | |
for (i = 0; i < E1000_FLASH_UPDATES; i++) { | |
usleep_range(1000, 2000); | |
- if ((er32(EECD) & E1000_EECD_FLUPD) == 0) | |
+ if (!(er32(EECD) & E1000_EECD_FLUPD)) | |
break; | |
} | |
@@ -823,8 +777,7 @@ | |
/* Reset the firmware if using STM opcode. */ | |
if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { | |
- /* | |
- * The enabling of and the actual reset must be done | |
+ /* The enabling of and the actual reset must be done | |
* in two write cycles. | |
*/ | |
ew32(HICR, E1000_HICR_FW_RESET_ENABLE); | |
@@ -838,7 +791,7 @@ | |
for (i = 0; i < E1000_FLASH_UPDATES; i++) { | |
usleep_range(1000, 2000); | |
- if ((er32(EECD) & E1000_EECD_FLUPD) == 0) | |
+ if (!(er32(EECD) & E1000_EECD_FLUPD)) | |
break; | |
} | |
@@ -884,8 +837,7 @@ | |
u32 i, eewr = 0; | |
s32 ret_val = 0; | |
- /* | |
- * A check for invalid values: offset too large, too many words, | |
+ /* A check for invalid values: offset too large, too many words, | |
* and not enough words. | |
*/ | |
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | |
@@ -895,9 +847,9 @@ | |
} | |
for (i = 0; i < words; i++) { | |
- eewr = (data[i] << E1000_NVM_RW_REG_DATA) | | |
- ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | | |
- E1000_NVM_RW_REG_START; | |
+ eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | | |
+ ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | | |
+ E1000_NVM_RW_REG_START); | |
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); | |
if (ret_val) | |
@@ -924,8 +876,7 @@ | |
s32 timeout = PHY_CFG_TIMEOUT; | |
while (timeout) { | |
- if (er32(EEMNGCTL) & | |
- E1000_NVM_CFG_DONE_PORT_0) | |
+ if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) | |
break; | |
usleep_range(1000, 2000); | |
timeout--; | |
@@ -967,6 +918,8 @@ | |
/* When LPLU is enabled, we should disable SmartSpeed */ | |
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); | |
+ if (ret_val) | |
+ return ret_val; | |
data &= ~IGP01E1000_PSCFR_SMART_SPEED; | |
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); | |
if (ret_val) | |
@@ -974,8 +927,7 @@ | |
} else { | |
data &= ~IGP02E1000_PM_D0_LPLU; | |
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); | |
- /* | |
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
* during Dx states where the power conservation is most | |
* important. During driver activity we should enable | |
* SmartSpeed, so performance is maintained. | |
@@ -1016,11 +968,10 @@ | |
**/ | |
static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |
{ | |
- u32 ctrl, ctrl_ext; | |
+ u32 ctrl, ctrl_ext, eecd, tctl; | |
s32 ret_val; | |
- /* | |
- * Prevent the PCI-E bus from sticking if there is no TLP connection | |
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection | |
* on the last TLP read/write transaction when MAC is reset. | |
*/ | |
ret_val = e1000e_disable_pcie_master(hw); | |
@@ -1031,13 +982,14 @@ | |
ew32(IMC, 0xffffffff); | |
ew32(RCTL, 0); | |
- ew32(TCTL, E1000_TCTL_PSP); | |
+ tctl = er32(TCTL); | |
+ tctl &= ~E1000_TCTL_EN; | |
+ ew32(TCTL, tctl); | |
e1e_flush(); | |
usleep_range(10000, 20000); | |
- /* | |
- * Must acquire the MDIO ownership before MAC reset. | |
+ /* Must acquire the MDIO ownership before MAC reset. | |
* Ownership defaults to firmware after a reset. | |
*/ | |
switch (hw->mac.type) { | |
@@ -1051,8 +1003,6 @@ | |
default: | |
break; | |
} | |
- if (ret_val) | |
- e_dbg("Cannot acquire MDIO ownership\n"); | |
ctrl = er32(CTRL); | |
@@ -1061,16 +1011,23 @@ | |
/* Must release MDIO ownership and mutex after MAC reset. */ | |
switch (hw->mac.type) { | |
+ case e1000_82573: | |
+ /* Release mutex only if the hw semaphore is acquired */ | |
+ if (!ret_val) | |
+ e1000_put_hw_semaphore_82573(hw); | |
+ break; | |
case e1000_82574: | |
case e1000_82583: | |
- e1000_put_hw_semaphore_82574(hw); | |
+ /* Release mutex only if the hw semaphore is acquired */ | |
+ if (!ret_val) | |
+ e1000_put_hw_semaphore_82574(hw); | |
break; | |
default: | |
break; | |
} | |
if (hw->nvm.type == e1000_nvm_flash_hw) { | |
- udelay(10); | |
+ usleep_range(10, 20); | |
ctrl_ext = er32(CTRL_EXT); | |
ctrl_ext |= E1000_CTRL_EXT_EE_RST; | |
ew32(CTRL_EXT, ctrl_ext); | |
@@ -1082,13 +1039,21 @@ | |
/* We don't want to continue accessing MAC registers. */ | |
return ret_val; | |
- /* | |
- * Phy configuration from NVM just starts after EECD_AUTO_RD is set. | |
+ /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. | |
* Need to wait for Phy configuration completion before accessing | |
* NVM and Phy. | |
*/ | |
switch (hw->mac.type) { | |
+ case e1000_82571: | |
+ case e1000_82572: | |
+ /* REQ and GNT bits need to be cleared when using AUTO_RD | |
+ * to access the EEPROM. | |
+ */ | |
+ eecd = er32(EECD); | |
+ eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); | |
+ ew32(EECD, eecd); | |
+ break; | |
case e1000_82573: | |
case e1000_82574: | |
case e1000_82583: | |
@@ -1134,17 +1099,16 @@ | |
e1000_initialize_hw_bits_82571(hw); | |
/* Initialize identification LED */ | |
- ret_val = e1000e_id_led_init(hw); | |
+ ret_val = mac->ops.id_led_init(hw); | |
+ /* An error is not fatal and we should not stop init due to this */ | |
if (ret_val) | |
e_dbg("Error initializing identification LED\n"); | |
- /* This is not fatal and we should not stop init due to this */ | |
/* Disabling VLAN filtering */ | |
e_dbg("Initializing the IEEE VLAN\n"); | |
mac->ops.clear_vfta(hw); | |
- /* Setup the receive address. */ | |
- /* | |
+ /* Setup the receive address. | |
* If, however, a locally administered address was assigned to the | |
* 82571, we must reserve a RAR for it to work around an issue where | |
* resetting one port will reload the MAC on the other port. | |
@@ -1159,13 +1123,12 @@ | |
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | |
/* Setup link and flow control */ | |
- ret_val = e1000_setup_link_82571(hw); | |
+ ret_val = mac->ops.setup_link(hw); | |
/* Set the transmit descriptor write-back policy */ | |
reg_data = er32(TXDCTL(0)); | |
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | |
- E1000_TXDCTL_FULL_TX_DESC_WB | | |
- E1000_TXDCTL_COUNT_DESC; | |
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | | |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); | |
ew32(TXDCTL(0), reg_data); | |
/* ...for both queues. */ | |
@@ -1181,15 +1144,14 @@ | |
break; | |
default: | |
reg_data = er32(TXDCTL(1)); | |
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | |
- E1000_TXDCTL_FULL_TX_DESC_WB | | |
- E1000_TXDCTL_COUNT_DESC; | |
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | | |
+ E1000_TXDCTL_FULL_TX_DESC_WB | | |
+ E1000_TXDCTL_COUNT_DESC); | |
ew32(TXDCTL(1), reg_data); | |
break; | |
} | |
- /* | |
- * Clear all of the statistics registers (clear on read). It is | |
+ /* Clear all of the statistics registers (clear on read). It is | |
* important that we do this after we have tried to establish link | |
* because the symbol error count will increment wildly if there | |
* is no link. | |
@@ -1221,12 +1183,16 @@ | |
/* Transmit Arbitration Control 0 */ | |
reg = er32(TARC(0)); | |
- reg &= ~(0xF << 27); /* 30:27 */ | |
+ reg &= ~(0xF << 27); /* 30:27 */ | |
switch (hw->mac.type) { | |
case e1000_82571: | |
case e1000_82572: | |
reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); | |
break; | |
+ case e1000_82574: | |
+ case e1000_82583: | |
+ reg |= (1 << 26); | |
+ break; | |
default: | |
break; | |
} | |
@@ -1281,18 +1247,24 @@ | |
reg |= E1000_PBA_ECC_CORR_EN; | |
ew32(PBA_ECC, reg); | |
} | |
- /* | |
- * Workaround for hardware errata. | |
+ | |
+ /* Workaround for hardware errata. | |
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 | |
*/ | |
+ if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { | |
+ reg = er32(CTRL_EXT); | |
+ reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; | |
+ ew32(CTRL_EXT, reg); | |
+ } | |
- if ((hw->mac.type == e1000_82571) || | |
- (hw->mac.type == e1000_82572)) { | |
- reg = er32(CTRL_EXT); | |
- reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; | |
- ew32(CTRL_EXT, reg); | |
- } | |
- | |
+ /* Disable IPv6 extension header parsing because some malformed | |
+ * IPv6 headers can hang the Rx. | |
+ */ | |
+ if (hw->mac.type <= e1000_82573) { | |
+ reg = er32(RFCTL); | |
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); | |
+ ew32(RFCTL, reg); | |
+ } | |
/* PCI-Ex Control Registers */ | |
switch (hw->mac.type) { | |
@@ -1302,8 +1274,7 @@ | |
reg |= (1 << 22); | |
ew32(GCR, reg); | |
- /* | |
- * Workaround for hardware errata. | |
+ /* Workaround for hardware errata. | |
* apply workaround for hardware errata documented in errata | |
* docs Fixes issue where some error prone or unreliable PCIe | |
* completions are occurring, particularly with ASPM enabled. | |
@@ -1337,8 +1308,7 @@ | |
case e1000_82574: | |
case e1000_82583: | |
if (hw->mng_cookie.vlan_id != 0) { | |
- /* | |
- * The VFTA is a 4096b bit-field, each identifying | |
+ /* The VFTA is a 4096b bit-field, each identifying | |
* a single VLAN ID. The following operations | |
* determine which 32b entry (i.e. offset) into the | |
* array we want to set the VLAN ID (i.e. bit) of | |
@@ -1346,17 +1316,17 @@ | |
*/ | |
vfta_offset = (hw->mng_cookie.vlan_id >> | |
E1000_VFTA_ENTRY_SHIFT) & | |
- E1000_VFTA_ENTRY_MASK; | |
- vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & | |
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK); | |
+ E1000_VFTA_ENTRY_MASK; | |
+ vfta_bit_in_reg = | |
+ 1 << (hw->mng_cookie.vlan_id & | |
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK); | |
} | |
break; | |
default: | |
break; | |
} | |
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { | |
- /* | |
- * If the offset we want to clear is the same offset of the | |
+ /* If the offset we want to clear is the same offset of the | |
* manageability VLAN ID, then clear all bits except that of | |
* the manageability unit. | |
*/ | |
@@ -1394,8 +1364,7 @@ | |
ctrl = hw->mac.ledctl_mode2; | |
if (!(E1000_STATUS_LU & er32(STATUS))) { | |
- /* | |
- * If no link, then turn LED on by setting the invert bit | |
+ /* If no link, then turn LED on by setting the invert bit | |
* for each LED that's "on" (0x0E) in ledctl_mode2. | |
*/ | |
for (i = 0; i < 4; i++) | |
@@ -1418,27 +1387,24 @@ | |
{ | |
u16 status_1kbt = 0; | |
u16 receive_errors = 0; | |
- bool phy_hung = false; | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
- /* | |
- * Read PHY Receive Error counter first, if its is max - all F's then | |
+ /* Read PHY Receive Error counter first, if its is max - all F's then | |
* read the Base1000T status register If both are max then PHY is hung. | |
*/ | |
ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); | |
- | |
if (ret_val) | |
- goto out; | |
- if (receive_errors == E1000_RECEIVE_ERROR_MAX) { | |
+ return false; | |
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) { | |
ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); | |
if (ret_val) | |
- goto out; | |
+ return false; | |
if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == | |
E1000_IDLE_ERROR_COUNT_MASK) | |
- phy_hung = true; | |
+ return true; | |
} | |
-out: | |
- return phy_hung; | |
+ | |
+ return false; | |
} | |
/** | |
@@ -1453,8 +1419,7 @@ | |
**/ | |
static s32 e1000_setup_link_82571(struct e1000_hw *hw) | |
{ | |
- /* | |
- * 82573 does not have a word in the NVM to determine | |
+ /* 82573 does not have a word in the NVM to determine | |
* the default flow control setting, so we explicitly | |
* set it to full. | |
*/ | |
@@ -1469,7 +1434,7 @@ | |
break; | |
} | |
- return e1000e_setup_link(hw); | |
+ return e1000e_setup_link_generic(hw); | |
} | |
/** | |
@@ -1506,9 +1471,7 @@ | |
if (ret_val) | |
return ret_val; | |
- ret_val = e1000e_setup_copper_link(hw); | |
- | |
- return ret_val; | |
+ return e1000e_setup_copper_link(hw); | |
} | |
/** | |
@@ -1523,8 +1486,7 @@ | |
switch (hw->mac.type) { | |
case e1000_82571: | |
case e1000_82572: | |
- /* | |
- * If SerDes loopback mode is entered, there is no form | |
+ /* If SerDes loopback mode is entered, there is no form | |
* of reset to take the adapter out of that mode. So we | |
* have to explicitly take the adapter out of loopback | |
* mode. This prevents drivers from twiddling their thumbs | |
@@ -1570,16 +1532,17 @@ | |
ctrl = er32(CTRL); | |
status = er32(STATUS); | |
+ er32(RXCW); | |
+ /* SYNCH bit and IV bit are sticky */ | |
+ usleep_range(10, 20); | |
rxcw = er32(RXCW); | |
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { | |
- | |
/* Receiver is synchronized with no invalid bits. */ | |
switch (mac->serdes_link_state) { | |
case e1000_serdes_link_autoneg_complete: | |
if (!(status & E1000_STATUS_LU)) { | |
- /* | |
- * We have lost link, retry autoneg before | |
+ /* We have lost link, retry autoneg before | |
* reporting link failure | |
*/ | |
mac->serdes_link_state = | |
@@ -1592,15 +1555,12 @@ | |
break; | |
case e1000_serdes_link_forced_up: | |
- /* | |
- * If we are receiving /C/ ordered sets, re-enable | |
+ /* If we are receiving /C/ ordered sets, re-enable | |
* auto-negotiation in the TXCW register and disable | |
* forced link in the Device Control register in an | |
* attempt to auto-negotiate with our link partner. | |
- * If the partner code word is null, stop forcing | |
- * and restart auto negotiation. | |
*/ | |
- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { | |
+ if (rxcw & E1000_RXCW_C) { | |
/* Enable autoneg, and unforce link up */ | |
ew32(TXCW, mac->txcw); | |
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | |
@@ -1615,8 +1575,7 @@ | |
case e1000_serdes_link_autoneg_progress: | |
if (rxcw & E1000_RXCW_C) { | |
- /* | |
- * We received /C/ ordered sets, meaning the | |
+ /* We received /C/ ordered sets, meaning the | |
* link partner has autonegotiated, and we can | |
* trust the Link Up (LU) status bit. | |
*/ | |
@@ -1632,8 +1591,7 @@ | |
e_dbg("AN_PROG -> DOWN\n"); | |
} | |
} else { | |
- /* | |
- * The link partner did not autoneg. | |
+ /* The link partner did not autoneg. | |
* Force link up and full duplex, and change | |
* state to forced. | |
*/ | |
@@ -1656,8 +1614,7 @@ | |
case e1000_serdes_link_down: | |
default: | |
- /* | |
- * The link was down but the receiver has now gained | |
+ /* The link was down but the receiver has now gained | |
* valid sync, so lets see if we can bring the link | |
* up. | |
*/ | |
@@ -1675,17 +1632,18 @@ | |
mac->serdes_link_state = e1000_serdes_link_down; | |
e_dbg("ANYSTATE -> DOWN\n"); | |
} else { | |
- /* | |
- * Check several times, if Sync and Config | |
- * both are consistently 1 then simply ignore | |
- * the Invalid bit and restart Autoneg | |
+ /* Check several times, if SYNCH bit and CONFIG | |
+ * bit both are consistently 1 then simply ignore | |
+ * the IV bit and restart Autoneg | |
*/ | |
for (i = 0; i < AN_RETRY_COUNT; i++) { | |
- udelay(10); | |
+ usleep_range(10, 20); | |
rxcw = er32(RXCW); | |
- if ((rxcw & E1000_RXCW_IV) && | |
- !((rxcw & E1000_RXCW_SYNCH) && | |
- (rxcw & E1000_RXCW_C))) { | |
+ if ((rxcw & E1000_RXCW_SYNCH) && | |
+ (rxcw & E1000_RXCW_C)) | |
+ continue; | |
+ | |
+ if (rxcw & E1000_RXCW_IV) { | |
mac->serdes_has_link = false; | |
mac->serdes_link_state = | |
e1000_serdes_link_down; | |
@@ -1774,14 +1732,14 @@ | |
/* If workaround is activated... */ | |
if (state) | |
- /* | |
- * Hold a copy of the LAA in RAR[14] This is done so that | |
+ /* Hold a copy of the LAA in RAR[14] This is done so that | |
* between the time RAR[0] gets clobbered and the time it | |
* gets fixed, the actual LAA is in one of the RARs and no | |
* incoming packets directed to this port are dropped. | |
* Eventually the LAA will be in RAR[0] and RAR[14]. | |
*/ | |
- e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); | |
+ hw->mac.ops.rar_set(hw, hw->mac.addr, | |
+ hw->mac.rar_entry_count - 1); | |
} | |
/** | |
@@ -1803,8 +1761,7 @@ | |
if (nvm->type != e1000_nvm_flash_hw) | |
return 0; | |
- /* | |
- * Check bit 4 of word 10h. If it is 0, firmware is done updating | |
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating | |
* 10h-12h. Checksum may need to be fixed. | |
*/ | |
ret_val = e1000_read_nvm(hw, 0x10, 1, &data); | |
@@ -1812,8 +1769,7 @@ | |
return ret_val; | |
if (!(data & 0x10)) { | |
- /* | |
- * Read 0x23 and check bit 15. This bit is a 1 | |
+ /* Read 0x23 and check bit 15. This bit is a 1 | |
* when the checksum has already been fixed. If | |
* the checksum is still wrong and this bit is a | |
* 1, we need to return bad checksum. Otherwise, | |
@@ -1830,6 +1786,8 @@ | |
if (ret_val) | |
return ret_val; | |
ret_val = e1000e_update_nvm_checksum(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
} | |
} | |
@@ -1842,23 +1800,19 @@ | |
**/ | |
static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) | |
{ | |
- s32 ret_val = 0; | |
- | |
if (hw->mac.type == e1000_82571) { | |
- /* | |
- * If there's an alternate MAC address place it in RAR0 | |
+ s32 ret_val; | |
+ | |
+ /* If there's an alternate MAC address place it in RAR0 | |
* so that it will override the Si installed default perm | |
* address. | |
*/ | |
ret_val = e1000_check_alt_mac_addr_generic(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
- ret_val = e1000_read_mac_addr_generic(hw); | |
- | |
-out: | |
- return ret_val; | |
+ return e1000_read_mac_addr_generic(hw); | |
} | |
/** | |
@@ -1873,7 +1827,7 @@ | |
struct e1000_phy_info *phy = &hw->phy; | |
struct e1000_mac_info *mac = &hw->mac; | |
- if (!(phy->ops.check_reset_block)) | |
+ if (!phy->ops.check_reset_block) | |
return; | |
/* If the management interface is not enabled, then power down */ | |
@@ -1930,7 +1884,7 @@ | |
static const struct e1000_mac_operations e82571_mac_ops = { | |
/* .check_mng_mode: mac type dependent */ | |
/* .check_for_link: media type dependent */ | |
- .id_led_init = e1000e_id_led_init, | |
+ .id_led_init = e1000e_id_led_init_generic, | |
.cleanup_led = e1000e_cleanup_led_generic, | |
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571, | |
.get_bus_info = e1000e_get_bus_info_pcie, | |
@@ -1946,7 +1900,9 @@ | |
.setup_link = e1000_setup_link_82571, | |
/* .setup_physical_interface: media type dependent */ | |
.setup_led = e1000e_setup_led_generic, | |
+ .config_collision_dist = e1000e_config_collision_dist_generic, | |
.read_mac_addr = e1000_read_mac_addr_82571, | |
+ .rar_set = e1000e_rar_set_generic, | |
}; | |
static const struct e1000_phy_operations e82_phy_ops_igp = { | |
@@ -1964,7 +1920,7 @@ | |
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | |
.set_d3_lplu_state = e1000e_set_d3_lplu_state, | |
.write_reg = e1000e_write_phy_reg_igp, | |
- .cfg_on_link_up = NULL, | |
+ .cfg_on_link_up = NULL, | |
}; | |
static const struct e1000_phy_operations e82_phy_ops_m88 = { | |
@@ -1973,7 +1929,7 @@ | |
.check_reset_block = e1000e_check_reset_block_generic, | |
.commit = e1000e_phy_sw_reset, | |
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | |
- .get_cfg_done = e1000e_get_cfg_done, | |
+ .get_cfg_done = e1000e_get_cfg_done_generic, | |
.get_cable_length = e1000e_get_cable_length_m88, | |
.get_info = e1000e_get_phy_info_m88, | |
.read_reg = e1000e_read_phy_reg_m88, | |
@@ -1982,7 +1938,7 @@ | |
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | |
.set_d3_lplu_state = e1000e_set_d3_lplu_state, | |
.write_reg = e1000e_write_phy_reg_m88, | |
- .cfg_on_link_up = NULL, | |
+ .cfg_on_link_up = NULL, | |
}; | |
static const struct e1000_phy_operations e82_phy_ops_bm = { | |
@@ -1991,7 +1947,7 @@ | |
.check_reset_block = e1000e_check_reset_block_generic, | |
.commit = e1000e_phy_sw_reset, | |
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | |
- .get_cfg_done = e1000e_get_cfg_done, | |
+ .get_cfg_done = e1000e_get_cfg_done_generic, | |
.get_cable_length = e1000e_get_cable_length_m88, | |
.get_info = e1000e_get_phy_info_m88, | |
.read_reg = e1000e_read_phy_reg_bm2, | |
@@ -2000,13 +1956,14 @@ | |
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | |
.set_d3_lplu_state = e1000e_set_d3_lplu_state, | |
.write_reg = e1000e_write_phy_reg_bm2, | |
- .cfg_on_link_up = NULL, | |
+ .cfg_on_link_up = NULL, | |
}; | |
static const struct e1000_nvm_operations e82571_nvm_ops = { | |
.acquire = e1000_acquire_nvm_82571, | |
.read = e1000e_read_nvm_eerd, | |
.release = e1000_release_nvm_82571, | |
+ .reload = e1000e_reload_nvm_generic, | |
.update = e1000_update_nvm_checksum_82571, | |
.valid_led_default = e1000_valid_led_default_82571, | |
.validate = e1000_validate_nvm_checksum_82571, | |
@@ -2076,13 +2033,16 @@ | |
| FLAG_HAS_MSIX | |
| FLAG_HAS_JUMBO_FRAMES | |
| FLAG_HAS_WOL | |
+ | FLAG_HAS_HW_TIMESTAMP | |
| FLAG_APME_IN_CTRL3 | |
| FLAG_HAS_SMART_POWER_DOWN | |
| FLAG_HAS_AMT | |
| FLAG_HAS_CTRLEXT_ON_LOAD, | |
- .flags2 = FLAG2_CHECK_PHY_HANG | |
+ .flags2 = FLAG2_CHECK_PHY_HANG | |
| FLAG2_DISABLE_ASPM_L0S | |
- | FLAG2_NO_DISABLE_RX, | |
+ | FLAG2_DISABLE_ASPM_L1 | |
+ | FLAG2_NO_DISABLE_RX | |
+ | FLAG2_DMA_BURST, | |
.pba = 32, | |
.max_hw_frame_size = DEFAULT_JUMBO, | |
.get_variants = e1000_get_variants_82571, | |
@@ -2095,12 +2055,14 @@ | |
.mac = e1000_82583, | |
.flags = FLAG_HAS_HW_VLAN_FILTER | |
| FLAG_HAS_WOL | |
+ | FLAG_HAS_HW_TIMESTAMP | |
| FLAG_APME_IN_CTRL3 | |
| FLAG_HAS_SMART_POWER_DOWN | |
| FLAG_HAS_AMT | |
| FLAG_HAS_JUMBO_FRAMES | |
| FLAG_HAS_CTRLEXT_ON_LOAD, | |
.flags2 = FLAG2_DISABLE_ASPM_L0S | |
+ | FLAG2_DISABLE_ASPM_L1 | |
| FLAG2_NO_DISABLE_RX, | |
.pba = 32, | |
.max_hw_frame_size = DEFAULT_JUMBO, | |
@@ -2109,4 +2071,3 @@ | |
.phy_ops = &e82_phy_ops_bm, | |
.nvm_ops = &e82571_nvm_ops, | |
}; | |
- | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: 82571.h | |
diff -ru e1000e/defines.h /home/arch/linux/drivers/net/ethernet/intel/e1000e/defines.h | |
--- e1000e/defines.h 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/defines.h 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -29,25 +29,6 @@ | |
#ifndef _E1000_DEFINES_H_ | |
#define _E1000_DEFINES_H_ | |
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ | |
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ | |
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ | |
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ | |
-#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ | |
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ | |
-#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ | |
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ | |
-#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ | |
-#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ | |
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ | |
-#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ | |
-#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ | |
-#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ | |
-#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ | |
-#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ | |
-#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ | |
-#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ | |
- | |
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ | |
#define REQ_TX_DESCRIPTOR_MULTIPLE 8 | |
#define REQ_RX_DESCRIPTOR_MULTIPLE 8 | |
@@ -74,7 +55,9 @@ | |
#define E1000_WUS_BC E1000_WUFC_BC | |
/* Extended Device Control */ | |
+#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ | |
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ | |
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ | |
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ | |
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ | |
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ | |
@@ -83,8 +66,7 @@ | |
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 | |
#define E1000_CTRL_EXT_EIAME 0x01000000 | |
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ | |
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ | |
-#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ | |
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ | |
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ | |
#define E1000_CTRL_EXT_LSECCK 0x00001000 | |
#define E1000_CTRL_EXT_PHYPDEN 0x00100000 | |
@@ -101,9 +83,11 @@ | |
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ | |
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ | |
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ | |
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ | |
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ | |
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ | |
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ | |
#define E1000_RXDEXT_STATERR_CE 0x01000000 | |
#define E1000_RXDEXT_STATERR_SE 0x02000000 | |
#define E1000_RXDEXT_STATERR_SEQ 0x04000000 | |
@@ -112,19 +96,26 @@ | |
/* mask to determine if packets should be dropped due to frame errors */ | |
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ | |
- E1000_RXD_ERR_CE | \ | |
- E1000_RXD_ERR_SE | \ | |
- E1000_RXD_ERR_SEQ | \ | |
- E1000_RXD_ERR_CXE | \ | |
- E1000_RXD_ERR_RXE) | |
+ E1000_RXD_ERR_CE | \ | |
+ E1000_RXD_ERR_SE | \ | |
+ E1000_RXD_ERR_SEQ | \ | |
+ E1000_RXD_ERR_CXE | \ | |
+ E1000_RXD_ERR_RXE) | |
/* Same mask, but for extended and packet split descriptors */ | |
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ | |
- E1000_RXDEXT_STATERR_CE | \ | |
- E1000_RXDEXT_STATERR_SE | \ | |
- E1000_RXDEXT_STATERR_SEQ | \ | |
- E1000_RXDEXT_STATERR_CXE | \ | |
- E1000_RXDEXT_STATERR_RXE) | |
+ E1000_RXDEXT_STATERR_CE | \ | |
+ E1000_RXDEXT_STATERR_SE | \ | |
+ E1000_RXDEXT_STATERR_SEQ | \ | |
+ E1000_RXDEXT_STATERR_CXE | \ | |
+ E1000_RXDEXT_STATERR_RXE) | |
+ | |
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 | |
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 | |
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 | |
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 | |
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 | |
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 | |
#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 | |
@@ -170,12 +161,12 @@ | |
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ | |
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ | |
#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ | |
+#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ | |
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ | |
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ | |
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ | |
-/* | |
- * Use byte values for the following shift parameters | |
+/* Use byte values for the following shift parameters | |
* Usage: | |
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & | |
* E1000_PSRCTL_BSIZE0_MASK) | | |
@@ -222,8 +213,11 @@ | |
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ | |
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ | |
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ | |
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ | |
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | |
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | |
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ | |
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ | |
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ | |
#define E1000_CTRL_RST 0x04000000 /* Global reset */ | |
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ | |
@@ -231,10 +225,9 @@ | |
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ | |
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ | |
-/* | |
- * Bit definitions for the Management Data IO (MDIO) and Management Data | |
- * Clock (MDC) pins in the Device Control Register. | |
- */ | |
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 | |
+ | |
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 | |
/* Device Status */ | |
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ | |
@@ -243,19 +236,17 @@ | |
#define E1000_STATUS_FUNC_SHIFT 2 | |
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ | |
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ | |
+#define E1000_STATUS_SPEED_MASK 0x000000C0 | |
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ | |
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | |
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | |
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ | |
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ | |
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ | |
- | |
-/* Constants used to interpret the masked PCI-X bus speed. */ | |
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */ | |
#define HALF_DUPLEX 1 | |
#define FULL_DUPLEX 2 | |
- | |
#define ADVERTISE_10_HALF 0x0001 | |
#define ADVERTISE_10_FULL 0x0002 | |
#define ADVERTISE_100_HALF 0x0004 | |
@@ -264,14 +255,15 @@ | |
#define ADVERTISE_1000_FULL 0x0020 | |
/* 1000/H is not supported, nor spec-compliant. */ | |
-#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ | |
- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ | |
- ADVERTISE_1000_FULL) | |
-#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ | |
- ADVERTISE_100_HALF | ADVERTISE_100_FULL) | |
-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) | |
-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) | |
-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) | |
+#define E1000_ALL_SPEED_DUPLEX ( \ | |
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ | |
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL) | |
+#define E1000_ALL_NOT_GIG ( \ | |
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ | |
+ ADVERTISE_100_FULL) | |
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) | |
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) | |
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) | |
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX | |
@@ -309,6 +301,7 @@ | |
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ | |
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ | |
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ | |
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ | |
/* Transmit Control */ | |
#define E1000_TCTL_EN 0x00000002 /* enable Tx */ | |
@@ -318,14 +311,14 @@ | |
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ | |
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ | |
-/* Transmit Arbitration Count */ | |
- | |
/* SerDes Control */ | |
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 | |
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 | |
/* Receive Checksum Control */ | |
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ | |
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ | |
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ | |
/* Header split receive */ | |
#define E1000_RFCTL_NFSW_DIS 0x00000040 | |
@@ -373,12 +366,23 @@ | |
#define E1000_KABGTXD_BGSQLBIAS 0x00050000 | |
+/* Low Power IDLE Control */ | |
+#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ | |
+ | |
/* PBA constants */ | |
#define E1000_PBA_8K 0x0008 /* 8KB */ | |
#define E1000_PBA_16K 0x0010 /* 16KB */ | |
+#define E1000_PBA_RXA_MASK 0xFFFF | |
+ | |
#define E1000_PBS_16K E1000_PBA_16K | |
+/* Uncorrectable/correctable ECC Error counts and enable bits */ | |
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF | |
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 | |
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 | |
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 | |
+ | |
#define IFS_MAX 80 | |
#define IFS_MIN 40 | |
#define IFS_RATIO 4 | |
@@ -398,7 +402,9 @@ | |
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ | |
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ | |
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ | |
-#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ | |
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ | |
+/* If this bit asserted, the driver should claim the interrupt */ | |
+#define E1000_ICR_INT_ASSERTED 0x80000000 | |
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ | |
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ | |
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ | |
@@ -412,8 +418,7 @@ | |
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ | |
#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ | |
-/* | |
- * This defines the bits that are set in the Interrupt Mask | |
+/* This defines the bits that are set in the Interrupt Mask | |
* Set/Read Register. Each bit is documented below: | |
* o RXT0 = Receiver Timer Interrupt (ring 0) | |
* o TXDW = Transmit Descriptor Written Back | |
@@ -422,11 +427,11 @@ | |
* o LSC = Link Status Change | |
*/ | |
#define IMS_ENABLE_MASK ( \ | |
- E1000_IMS_RXT0 | \ | |
- E1000_IMS_TXDW | \ | |
- E1000_IMS_RXDMT0 | \ | |
- E1000_IMS_RXSEQ | \ | |
- E1000_IMS_LSC) | |
+ E1000_IMS_RXT0 | \ | |
+ E1000_IMS_TXDW | \ | |
+ E1000_IMS_RXDMT0 | \ | |
+ E1000_IMS_RXSEQ | \ | |
+ E1000_IMS_LSC) | |
/* Interrupt Mask Set */ | |
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | |
@@ -434,6 +439,7 @@ | |
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ | |
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ | |
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ | |
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ | |
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ | |
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ | |
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ | |
@@ -463,8 +469,7 @@ | |
/* 802.1q VLAN Packet Size */ | |
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ | |
-/* Receive Address */ | |
-/* | |
+/* Receive Address | |
* Number of high/low register pairs in the RAR. The RAR (Receive Address | |
* Registers) holds the directed and multicast addresses that we monitor. | |
* Technically, we have 16 spots. However, we reserve one of these spots | |
@@ -525,6 +530,28 @@ | |
#define E1000_RXCW_C 0x20000000 /* Receive config */ | |
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ | |
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ | |
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ | |
+ | |
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ | |
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ | |
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 | |
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 | |
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 | |
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 | |
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A | |
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ | |
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ | |
+ | |
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 | |
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 | |
+ | |
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 | |
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 | |
+ | |
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24 | |
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF | |
+ | |
/* PCI Express Control */ | |
#define E1000_GCR_RXD_NO_SNOOP 0x00000001 | |
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 | |
@@ -540,65 +567,6 @@ | |
E1000_GCR_TXDSCW_NO_SNOOP | \ | |
E1000_GCR_TXDSCR_NO_SNOOP) | |
-/* PHY Control Register */ | |
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ | |
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ | |
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */ | |
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ | |
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ | |
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ | |
-#define MII_CR_SPEED_1000 0x0040 | |
-#define MII_CR_SPEED_100 0x2000 | |
-#define MII_CR_SPEED_10 0x0000 | |
- | |
-/* PHY Status Register */ | |
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ | |
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ | |
- | |
-/* Autoneg Advertisement Register */ | |
-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ | |
-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ | |
-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ | |
-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ | |
-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ | |
-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ | |
- | |
-/* Link Partner Ability Register (Base Page) */ | |
-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ | |
-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ | |
- | |
-/* Autoneg Expansion Register */ | |
-#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ | |
- | |
-/* 1000BASE-T Control Register */ | |
-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ | |
-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ | |
- /* 0=DTE device */ | |
-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ | |
- /* 0=Configure PHY as Slave */ | |
-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ | |
- /* 0=Automatic Master/Slave config */ | |
- | |
-/* 1000BASE-T Status Register */ | |
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ | |
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ | |
- | |
- | |
-/* PHY 1000 MII Register/Bit Definitions */ | |
-/* PHY Registers defined by IEEE */ | |
-#define PHY_CONTROL 0x00 /* Control Register */ | |
-#define PHY_STATUS 0x01 /* Status Register */ | |
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ | |
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ | |
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ | |
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ | |
-#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ | |
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ | |
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ | |
-#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ | |
- | |
-#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ | |
- | |
/* NVM Control */ | |
#define E1000_EECD_SK 0x00000001 /* NVM Clock */ | |
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ | |
@@ -619,17 +587,21 @@ | |
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ | |
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) | |
-#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ | |
-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ | |
-#define E1000_NVM_RW_REG_START 1 /* Start operation */ | |
-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ | |
-#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ | |
-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ | |
-#define E1000_FLASH_UPDATES 2000 | |
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */ | |
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ | |
+#define E1000_NVM_RW_REG_START 1 /* Start operation */ | |
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ | |
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */ | |
+#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */ | |
+#define E1000_FLASH_UPDATES 2000 | |
/* NVM Word Offsets */ | |
#define NVM_COMPAT 0x0003 | |
#define NVM_ID_LED_SETTINGS 0x0004 | |
+#define NVM_FUTURE_INIT_WORD1 0x0019 | |
+#define NVM_COMPAT_VALID_CSUM 0x0001 | |
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 | |
+ | |
#define NVM_INIT_CONTROL2_REG 0x000F | |
#define NVM_INIT_CONTROL3_PORT_B 0x0014 | |
#define NVM_INIT_3GIO_3 0x001A | |
@@ -638,8 +610,6 @@ | |
#define NVM_ALT_MAC_ADDR_PTR 0x0037 | |
#define NVM_CHECKSUM_REG 0x003F | |
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ | |
- | |
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ | |
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ | |
@@ -710,8 +680,7 @@ | |
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ | |
#define MAX_PHY_MULTI_PAGE_REG 0xF | |
-/* Bit definitions for valid PHY IDs. */ | |
-/* | |
+/* Bit definitions for valid PHY IDs. | |
* I = Integrated | |
* E = External | |
*/ | |
@@ -730,6 +699,7 @@ | |
#define I82577_E_PHY_ID 0x01540050 | |
#define I82578_E_PHY_ID 0x004DD040 | |
#define I82579_E_PHY_ID 0x01540090 | |
+#define I217_E_PHY_ID 0x015400A0 | |
/* M88E1000 Specific Registers */ | |
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ | |
@@ -748,10 +718,6 @@ | |
#define M88E1000_PSCR_AUTO_X_1000T 0x0040 | |
/* Auto crossover enabled all speeds */ | |
#define M88E1000_PSCR_AUTO_X_MODE 0x0060 | |
-/* | |
- * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) | |
- * 0=Normal 10BASE-T Rx Threshold | |
- */ | |
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ | |
/* M88E1000 PHY Specific Status Register */ | |
@@ -765,14 +731,12 @@ | |
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 | |
-/* | |
- * Number of times we will attempt to autonegotiate before downshifting if we | |
+/* Number of times we will attempt to autonegotiate before downshifting if we | |
* are the master | |
*/ | |
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 | |
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 | |
-/* | |
- * Number of times we will attempt to autonegotiate before downshifting if we | |
+/* Number of times we will attempt to autonegotiate before downshifting if we | |
* are the slave | |
*/ | |
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 | |
@@ -789,13 +753,7 @@ | |
/* BME1000 PHY Specific Control Register */ | |
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ | |
- | |
-#define PHY_PAGE_SHIFT 5 | |
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ | |
- ((reg) & MAX_PHY_REG_ADDRESS)) | |
- | |
-/* | |
- * Bits... | |
+/* Bits... | |
* 15-5: page | |
* 4-0: register offset | |
*/ | |
@@ -831,6 +789,7 @@ | |
GG82563_REG(194, 18) /* Inband Control */ | |
/* MDI Control */ | |
+#define E1000_MDIC_REG_MASK 0x001F0000 | |
#define E1000_MDIC_REG_SHIFT 16 | |
#define E1000_MDIC_PHY_SHIFT 21 | |
#define E1000_MDIC_OP_WRITE 0x04000000 | |
diff -ru e1000e/e1000.h /home/arch/linux/drivers/net/ethernet/intel/e1000e/e1000.h | |
--- e1000e/e1000.h 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/e1000.h 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -41,7 +41,12 @@ | |
#include <linux/pci-aspm.h> | |
#include <linux/crc32.h> | |
#include <linux/if_vlan.h> | |
- | |
+#include <linux/clocksource.h> | |
+#include <linux/net_tstamp.h> | |
+#include <linux/ptp_clock_kernel.h> | |
+#include <linux/ptp_classify.h> | |
+#include <linux/mii.h> | |
+#include <linux/mdio.h> | |
#include "hw.h" | |
struct e1000_info; | |
@@ -57,7 +62,6 @@ | |
#define e_notice(format, arg...) \ | |
netdev_notice(adapter->netdev, format, ## arg) | |
- | |
/* Interrupt modes, as used by the IntMode parameter */ | |
#define E1000E_INT_MODE_LEGACY 0 | |
#define E1000E_INT_MODE_MSI 1 | |
@@ -75,9 +79,6 @@ | |
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ | |
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ | |
-/* Early Receive defines */ | |
-#define E1000_ERT_2048 0x100 | |
- | |
#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ | |
/* How many Tx Descriptors do we need to call netif_wake_queue ? */ | |
@@ -89,93 +90,30 @@ | |
#define E1000_MNG_VLAN_NONE (-1) | |
-/* Number of packet split data buffers (not including the header buffer) */ | |
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) | |
- | |
#define DEFAULT_JUMBO 9234 | |
-/* BM/HV Specific Registers */ | |
-#define BM_PORT_CTRL_PAGE 769 | |
- | |
-#define PHY_UPPER_SHIFT 21 | |
-#define BM_PHY_REG(page, reg) \ | |
- (((reg) & MAX_PHY_REG_ADDRESS) |\ | |
- (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ | |
- (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) | |
- | |
-/* PHY Wakeup Registers and defines */ | |
-#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) | |
-#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) | |
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) | |
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) | |
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) | |
-#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) | |
-#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) | |
-#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) | |
-#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) | |
-#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) | |
- | |
-#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ | |
-#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ | |
-#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ | |
-#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ | |
-#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ | |
-#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ | |
-#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ | |
- | |
-#define HV_STATS_PAGE 778 | |
-#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ | |
-#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) | |
-#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ | |
-#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) | |
-#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ | |
-#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) | |
-#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ | |
-#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) | |
-#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ | |
-#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) | |
-#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ | |
-#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) | |
-#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ | |
-#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) | |
- | |
-#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ | |
- | |
-/* BM PHY Copper Specific Status */ | |
-#define BM_CS_STATUS 17 | |
-#define BM_CS_STATUS_LINK_UP 0x0400 | |
-#define BM_CS_STATUS_RESOLVED 0x0800 | |
-#define BM_CS_STATUS_SPEED_MASK 0xC000 | |
-#define BM_CS_STATUS_SPEED_1000 0x8000 | |
- | |
-/* 82577 Mobile Phy Status Register */ | |
-#define HV_M_STATUS 26 | |
-#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 | |
-#define HV_M_STATUS_SPEED_MASK 0x0300 | |
-#define HV_M_STATUS_SPEED_1000 0x0200 | |
-#define HV_M_STATUS_LINK_UP 0x0040 | |
- | |
-#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ | |
-#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 | |
- | |
/* Time to wait before putting the device into D3 if there's no link (in ms). */ | |
#define LINK_TIMEOUT 100 | |
+/* Count for polling __E1000_RESET condition every 10-20msec. | |
+ * Experimentation has shown the reset can take approximately 210msec. | |
+ */ | |
+#define E1000_CHECK_RESET_COUNT 25 | |
+ | |
#define DEFAULT_RDTR 0 | |
#define DEFAULT_RADV 8 | |
#define BURST_RDTR 0x20 | |
#define BURST_RADV 0x20 | |
-/* | |
- * in the case of WTHRESH, it appears at least the 82571/2 hardware | |
+/* in the case of WTHRESH, it appears at least the 82571/2 hardware | |
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when | |
- * WTHRESH=4, and since we want 64 bytes at a time written back, set | |
- * it to 5 | |
+ * WTHRESH=4, so a setting of 5 gives the most efficient bus | |
+ * utilization but to avoid possible Tx stalls, set it to 1 | |
*/ | |
#define E1000_TXDCTL_DMA_BURST_ENABLE \ | |
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ | |
E1000_TXDCTL_COUNT_DESC | \ | |
- (5 << 16) | /* wthresh must be +1 more than desired */\ | |
+ (1 << 16) | /* wthresh must be +1 more than desired */\ | |
(1 << 8) | /* hthresh */ \ | |
0x1f) /* pthresh */ | |
@@ -200,6 +138,7 @@ | |
board_ich10lan, | |
board_pchlan, | |
board_pch2lan, | |
+ board_pch_lpt, | |
}; | |
struct e1000_ps_page { | |
@@ -207,8 +146,7 @@ | |
u64 dma; /* must be u64 - written to hw */ | |
}; | |
-/* | |
- * wrappers around a pointer to a socket buffer, | |
+/* wrappers around a pointer to a socket buffer, | |
* so a DMA handle can be stored along with the buffer | |
*/ | |
struct e1000_buffer { | |
@@ -234,6 +172,7 @@ | |
}; | |
struct e1000_ring { | |
+ struct e1000_adapter *adapter; /* back pointer to adapter */ | |
void *desc; /* pointer to ring memory */ | |
dma_addr_t dma; /* phys address of ring */ | |
unsigned int size; /* length of ring in bytes */ | |
@@ -242,8 +181,8 @@ | |
u16 next_to_use; | |
u16 next_to_clean; | |
- u16 head; | |
- u16 tail; | |
+ void __iomem *head; | |
+ void __iomem *tail; | |
/* array of buffer information structs */ | |
struct e1000_buffer *buffer_info; | |
@@ -251,7 +190,7 @@ | |
char name[IFNAMSIZ + 5]; | |
u32 ims_val; | |
u32 itr_val; | |
- u16 itr_register; | |
+ void __iomem *itr_register; | |
int set_itr; | |
struct sk_buff *rx_skb_top; | |
@@ -297,14 +236,14 @@ | |
u16 tx_itr; | |
u16 rx_itr; | |
- /* | |
- * Tx | |
- */ | |
- struct e1000_ring *tx_ring /* One per active queue */ | |
- ____cacheline_aligned_in_smp; | |
+ /* Tx - one ring per active queue */ | |
+ struct e1000_ring *tx_ring ____cacheline_aligned_in_smp; | |
+ u32 tx_fifo_limit; | |
struct napi_struct napi; | |
+ unsigned int uncorr_errors; /* uncorrectable ECC errors */ | |
+ unsigned int corr_errors; /* correctable ECC errors */ | |
unsigned int restart_queue; | |
u32 txd_cmd; | |
@@ -331,14 +270,11 @@ | |
u32 tx_fifo_size; | |
u32 tx_dma_failed; | |
- /* | |
- * Rx | |
- */ | |
- bool (*clean_rx) (struct e1000_adapter *adapter, | |
- int *work_done, int work_to_do) | |
- ____cacheline_aligned_in_smp; | |
- void (*alloc_rx_buf) (struct e1000_adapter *adapter, | |
- int cleaned_count, gfp_t gfp); | |
+ /* Rx */ | |
+ bool (*clean_rx) (struct e1000_ring *ring, int *work_done, | |
+ int work_to_do) ____cacheline_aligned_in_smp; | |
+ void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, | |
+ gfp_t gfp); | |
struct e1000_ring *rx_ring; | |
u32 rx_int_delay; | |
@@ -352,6 +288,7 @@ | |
u64 gorc_old; | |
u32 alloc_rx_buff_failed; | |
u32 rx_dma_failed; | |
+ u32 rx_hwtstamp_cleared; | |
unsigned int rx_ps_pages; | |
u16 rx_ps_bsize0; | |
@@ -365,7 +302,7 @@ | |
/* structs defined in e1000_hw.h */ | |
struct e1000_hw hw; | |
- spinlock_t stats64_lock; | |
+ spinlock_t stats64_lock; /* protects statistics counters */ | |
struct e1000_hw_stats stats; | |
struct e1000_phy_info phy_info; | |
struct e1000_phy_stats phy_stats; | |
@@ -398,6 +335,21 @@ | |
bool idle_check; | |
int phy_hang_count; | |
+ | |
+ u16 tx_ring_count; | |
+ u16 rx_ring_count; | |
+ | |
+ struct hwtstamp_config hwtstamp_config; | |
+ struct delayed_work systim_overflow_work; | |
+ struct sk_buff *tx_hwtstamp_skb; | |
+ struct work_struct tx_hwtstamp_work; | |
+ spinlock_t systim_lock; /* protects SYSTIML/H regsters */ | |
+ struct cyclecounter cc; | |
+ struct timecounter tc; | |
+ struct ptp_clock *ptp_clock; | |
+ struct ptp_clock_info ptp_clock_info; | |
+ | |
+ u16 eee_advert; | |
}; | |
struct e1000_info { | |
@@ -412,12 +364,46 @@ | |
const struct e1000_nvm_operations *nvm_ops; | |
}; | |
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); | |
+ | |
+/* The system time is maintained by a 64-bit counter comprised of the 32-bit | |
+ * SYSTIMH and SYSTIML registers. How the counter increments (and therefore | |
+ * its resolution) is based on the contents of the TIMINCA register - it | |
+ * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0). | |
+ * For the best accuracy, the incperiod should be as small as possible. The | |
+ * incvalue is scaled by a factor as large as possible (while still fitting | |
+ * in bits 23:0) so that relatively small clock corrections can be made. | |
+ * | |
+ * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of | |
+ * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) | |
+ * bits to count nanoseconds leaving the rest for fractional nonseconds. | |
+ */ | |
+#define INCVALUE_96MHz 125 | |
+#define INCVALUE_SHIFT_96MHz 17 | |
+#define INCPERIOD_SHIFT_96MHz 2 | |
+#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz) | |
+ | |
+#define INCVALUE_25MHz 40 | |
+#define INCVALUE_SHIFT_25MHz 18 | |
+#define INCPERIOD_25MHz 1 | |
+ | |
+/* Another drawback of scaling the incvalue by a large factor is the | |
+ * 64-bit SYSTIM register overflows more quickly. This is dealt with | |
+ * by simply reading the clock before it overflows. | |
+ * | |
+ * Clock ns bits Overflows after | |
+ * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~ | |
+ * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs | |
+ * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours | |
+ */ | |
+#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) | |
+ | |
/* hardware capability, feature, and workaround flags */ | |
#define FLAG_HAS_AMT (1 << 0) | |
#define FLAG_HAS_FLASH (1 << 1) | |
#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) | |
#define FLAG_HAS_WOL (1 << 3) | |
-#define FLAG_HAS_ERT (1 << 4) | |
+/* reserved bit4 */ | |
#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) | |
#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) | |
#define FLAG_HAS_JUMBO_FRAMES (1 << 7) | |
@@ -427,7 +413,7 @@ | |
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) | |
#define FLAG_IS_QUAD_PORT_A (1 << 12) | |
#define FLAG_IS_QUAD_PORT (1 << 13) | |
-#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) | |
+#define FLAG_HAS_HW_TIMESTAMP (1 << 14) | |
#define FLAG_APME_IN_WUC (1 << 15) | |
#define FLAG_APME_IN_CTRL3 (1 << 16) | |
#define FLAG_APME_CHECK_PORT_B (1 << 17) | |
@@ -443,7 +429,7 @@ | |
#define FLAG_MSI_ENABLED (1 << 27) | |
/* reserved (1 << 28) */ | |
#define FLAG_TSO_FORCE (1 << 29) | |
-#define FLAG_RX_RESTART_NOW (1 << 30) | |
+#define FLAG_RESTART_NOW (1 << 30) | |
#define FLAG_MSI_TEST_FAILED (1 << 31) | |
#define FLAG2_CRC_STRIPPING (1 << 0) | |
@@ -458,6 +444,8 @@ | |
#define FLAG2_CHECK_PHY_HANG (1 << 9) | |
#define FLAG2_NO_DISABLE_RX (1 << 10) | |
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) | |
+#define FLAG2_DFLT_CRC_STRIPPING (1 << 12) | |
+#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) | |
#define E1000_RX_DESC_PS(R, i) \ | |
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | |
@@ -484,30 +472,28 @@ | |
extern char e1000e_driver_name[]; | |
extern const char e1000e_driver_version[]; | |
-extern void e1000e_check_options(struct e1000_adapter *adapter); | |
-extern void e1000e_set_ethtool_ops(struct net_device *netdev); | |
+void e1000e_check_options(struct e1000_adapter *adapter); | |
+void e1000e_set_ethtool_ops(struct net_device *netdev); | |
-extern int e1000e_up(struct e1000_adapter *adapter); | |
-extern void e1000e_down(struct e1000_adapter *adapter); | |
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter); | |
-extern void e1000e_reset(struct e1000_adapter *adapter); | |
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter); | |
-extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); | |
-extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); | |
-extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); | |
-extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); | |
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, | |
- struct rtnl_link_stats64 | |
- *stats); | |
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); | |
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); | |
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter); | |
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter); | |
+int e1000e_up(struct e1000_adapter *adapter); | |
+void e1000e_down(struct e1000_adapter *adapter); | |
+void e1000e_reinit_locked(struct e1000_adapter *adapter); | |
+void e1000e_reset(struct e1000_adapter *adapter); | |
+void e1000e_power_up_phy(struct e1000_adapter *adapter); | |
+int e1000e_setup_rx_resources(struct e1000_ring *ring); | |
+int e1000e_setup_tx_resources(struct e1000_ring *ring); | |
+void e1000e_free_rx_resources(struct e1000_ring *ring); | |
+void e1000e_free_tx_resources(struct e1000_ring *ring); | |
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, | |
+ struct rtnl_link_stats64 *stats); | |
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); | |
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); | |
+void e1000e_get_hw_control(struct e1000_adapter *adapter); | |
+void e1000e_release_hw_control(struct e1000_adapter *adapter); | |
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); | |
extern unsigned int copybreak; | |
-extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); | |
- | |
extern const struct e1000_info e1000_82571_info; | |
extern const struct e1000_info e1000_82572_info; | |
extern const struct e1000_info e1000_82573_info; | |
@@ -518,154 +504,25 @@ | |
extern const struct e1000_info e1000_ich10_info; | |
extern const struct e1000_info e1000_pch_info; | |
extern const struct e1000_info e1000_pch2_info; | |
+extern const struct e1000_info e1000_pch_lpt_info; | |
extern const struct e1000_info e1000_es2_info; | |
-extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, | |
- u32 pba_num_size); | |
- | |
-extern s32 e1000e_commit_phy(struct e1000_hw *hw); | |
- | |
-extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); | |
- | |
-extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); | |
-extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); | |
- | |
-extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); | |
-extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |
- bool state); | |
-extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); | |
-extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); | |
-extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); | |
-extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); | |
-extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | |
-extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); | |
-extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); | |
- | |
-extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); | |
-extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); | |
-extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); | |
-extern s32 e1000e_setup_led_generic(struct e1000_hw *hw); | |
-extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); | |
-extern s32 e1000e_led_on_generic(struct e1000_hw *hw); | |
-extern s32 e1000e_led_off_generic(struct e1000_hw *hw); | |
-extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); | |
-extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); | |
-extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); | |
-extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); | |
-extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); | |
-extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); | |
-extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); | |
-extern s32 e1000e_id_led_init(struct e1000_hw *hw); | |
-extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); | |
-extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); | |
-extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); | |
-extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); | |
-extern s32 e1000e_setup_link(struct e1000_hw *hw); | |
-extern void e1000_clear_vfta_generic(struct e1000_hw *hw); | |
-extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); | |
-extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |
- u8 *mc_addr_list, | |
- u32 mc_addr_count); | |
-extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); | |
-extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); | |
-extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); | |
-extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); | |
-extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); | |
-extern void e1000e_config_collision_dist(struct e1000_hw *hw); | |
-extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); | |
-extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); | |
-extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); | |
-extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); | |
-extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); | |
-extern void e1000e_reset_adaptive(struct e1000_hw *hw); | |
-extern void e1000e_update_adaptive(struct e1000_hw *hw); | |
- | |
-extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); | |
-extern s32 e1000e_get_phy_id(struct e1000_hw *hw); | |
-extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); | |
-extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); | |
-extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); | |
-extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); | |
-extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); | |
-extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); | |
-extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); | |
-extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, | |
- u16 *data); | |
-extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); | |
-extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); | |
-extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); | |
-extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, | |
- u16 data); | |
-extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); | |
-extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); | |
-extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); | |
-extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); | |
-extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); | |
-extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); | |
-extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); | |
-extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); | |
-extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); | |
-extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); | |
-extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); | |
-extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); | |
-extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, | |
- u16 *phy_reg); | |
-extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, | |
- u16 *phy_reg); | |
-extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); | |
-extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); | |
-extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); | |
-extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); | |
-extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | |
- u16 data); | |
-extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); | |
-extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | |
- u16 *data); | |
-extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | |
- u32 usec_interval, bool *success); | |
-extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); | |
-extern void e1000_power_up_phy_copper(struct e1000_hw *hw); | |
-extern void e1000_power_down_phy_copper(struct e1000_hw *hw); | |
-extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); | |
-extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); | |
-extern s32 e1000e_check_downshift(struct e1000_hw *hw); | |
-extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); | |
-extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | |
- u16 *data); | |
-extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, | |
- u16 *data); | |
-extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); | |
-extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | |
- u16 data); | |
-extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, | |
- u16 data); | |
-extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); | |
-extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); | |
-extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); | |
-extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); | |
-extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); | |
-extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); | |
- | |
-extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); | |
-extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); | |
-extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); | |
-extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); | |
-extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); | |
-extern bool e1000_check_phy_82574(struct e1000_hw *hw); | |
+void e1000e_ptp_init(struct e1000_adapter *adapter); | |
+void e1000e_ptp_remove(struct e1000_adapter *adapter); | |
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) | |
{ | |
return hw->phy.ops.reset(hw); | |
} | |
-static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | |
+static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) | |
{ | |
- return hw->phy.ops.check_reset_block(hw); | |
+ return hw->phy.ops.read_reg(hw, offset, data); | |
} | |
-static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) | |
+static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data) | |
{ | |
- return hw->phy.ops.read_reg(hw, offset, data); | |
+ return hw->phy.ops.read_reg_locked(hw, offset, data); | |
} | |
static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) | |
@@ -673,20 +530,12 @@ | |
return hw->phy.ops.write_reg(hw, offset, data); | |
} | |
-static inline s32 e1000_get_cable_length(struct e1000_hw *hw) | |
+static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) | |
{ | |
- return hw->phy.ops.get_cable_length(hw); | |
+ return hw->phy.ops.write_reg_locked(hw, offset, data); | |
} | |
-extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); | |
-extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | |
-extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); | |
-extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); | |
-extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | |
-extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); | |
-extern void e1000e_release_nvm(struct e1000_hw *hw); | |
-extern void e1000e_reload_nvm(struct e1000_hw *hw); | |
-extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); | |
+void e1000e_reload_nvm_generic(struct e1000_hw *hw); | |
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |
{ | |
@@ -706,12 +555,14 @@ | |
return hw->nvm.ops.update(hw); | |
} | |
-static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, | |
+ u16 *data) | |
{ | |
return hw->nvm.ops.read(hw, offset, words, data); | |
} | |
-static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, | |
+ u16 *data) | |
{ | |
return hw->nvm.ops.write(hw, offset, words, data); | |
} | |
@@ -721,23 +572,51 @@ | |
return hw->phy.ops.get_info(hw); | |
} | |
-static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) | |
+static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) | |
{ | |
- return hw->mac.ops.check_mng_mode(hw); | |
+ return readl(hw->hw_addr + reg); | |
} | |
-extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); | |
-extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); | |
-extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); | |
+#define er32(reg) __er32(hw, E1000_##reg) | |
-static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) | |
+/** | |
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts | |
+ * @hw: pointer to the HW structure | |
+ * | |
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could | |
+ * be accessing the registers at the same time. Normally, this is handled in | |
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host | |
+ * accesses later than it should which could result in the register to have | |
+ * an incorrect value. Workaround this by checking the FWSM register which | |
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set | |
+ * and try again a number of times. | |
+ **/ | |
+static inline s32 __ew32_prepare(struct e1000_hw *hw) | |
{ | |
- return readl(hw->hw_addr + reg); | |
+ s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; | |
+ | |
+ while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) | |
+ udelay(50); | |
+ | |
+ return i; | |
} | |
static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) | |
{ | |
+ if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | |
+ __ew32_prepare(hw); | |
+ | |
writel(val, hw->hw_addr + reg); | |
} | |
+#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) | |
+ | |
+#define e1e_flush() er32(STATUS) | |
+ | |
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ | |
+ (__ew32((a), (reg + ((offset) << 2)), (value))) | |
+ | |
+#define E1000_READ_REG_ARRAY(a, reg, offset) \ | |
+ (readl((a)->hw_addr + reg + ((offset) << 2))) | |
+ | |
#endif /* _E1000_H_ */ | |
diff -ru e1000e/ethtool.c /home/arch/linux/drivers/net/ethernet/intel/e1000e/ethtool.c | |
--- e1000e/ethtool.c 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/ethtool.c 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -34,10 +34,12 @@ | |
#include <linux/pci.h> | |
#include <linux/slab.h> | |
#include <linux/delay.h> | |
+#include <linux/vmalloc.h> | |
+#include <linux/pm_runtime.h> | |
#include "e1000.h" | |
-enum {NETDEV_STATS, E1000_STATS}; | |
+enum { NETDEV_STATS, E1000_STATS }; | |
struct e1000_stats { | |
char stat_string[ETH_GSTRING_LEN]; | |
@@ -97,7 +99,6 @@ | |
E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), | |
E1000_STAT("tx_flow_control_xon", stats.xontxc), | |
E1000_STAT("tx_flow_control_xoff", stats.xofftxc), | |
- E1000_STAT("rx_long_byte_count", stats.gorc), | |
E1000_STAT("rx_csum_offload_good", hw_csum_good), | |
E1000_STAT("rx_csum_offload_errors", hw_csum_err), | |
E1000_STAT("rx_header_split", rx_hdr_split), | |
@@ -107,6 +108,9 @@ | |
E1000_STAT("dropped_smbus", stats.mgpdc), | |
E1000_STAT("rx_dma_failed", rx_dma_failed), | |
E1000_STAT("tx_dma_failed", tx_dma_failed), | |
+ E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), | |
+ E1000_STAT("uncorr_ecc_errors", uncorr_errors), | |
+ E1000_STAT("corr_ecc_errors", corr_errors), | |
}; | |
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) | |
@@ -116,6 +120,7 @@ | |
"Interrupt test (offline)", "Loopback test (offline)", | |
"Link test (on/offline)" | |
}; | |
+ | |
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) | |
static int e1000_get_settings(struct net_device *netdev, | |
@@ -126,7 +131,6 @@ | |
u32 speed; | |
if (hw->phy.media_type == e1000_media_type_copper) { | |
- | |
ecmd->supported = (SUPPORTED_10baseT_Half | | |
SUPPORTED_10baseT_Full | | |
SUPPORTED_100baseT_Half | | |
@@ -169,7 +173,7 @@ | |
speed = adapter->link_speed; | |
ecmd->duplex = adapter->link_duplex - 1; | |
} | |
- } else { | |
+ } else if (!pm_runtime_suspended(netdev->dev.parent)) { | |
u32 status = er32(STATUS); | |
if (status & E1000_STATUS_LU) { | |
if (status & E1000_STATUS_SPEED_1000) | |
@@ -193,11 +197,15 @@ | |
/* MDI-X => 2; MDI =>1; Invalid =>0 */ | |
if ((hw->phy.media_type == e1000_media_type_copper) && | |
netif_carrier_ok(netdev)) | |
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : | |
- ETH_TP_MDI; | |
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; | |
else | |
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; | |
+ if (hw->phy.mdix == AUTO_ALL_MODES) | |
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; | |
+ else | |
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; | |
+ | |
return 0; | |
} | |
@@ -208,14 +216,14 @@ | |
mac->autoneg = 0; | |
/* Make sure dplx is at most 1 bit and lsb of speed is not set | |
- * for the switch() below to work */ | |
+ * for the switch() below to work | |
+ */ | |
if ((spd & 1) || (dplx & ~1)) | |
goto err_inval; | |
/* Fiber NICs only allow 1000 gbps Full duplex */ | |
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && | |
- spd != SPEED_1000 && | |
- dplx != DUPLEX_FULL) { | |
+ (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) { | |
goto err_inval; | |
} | |
@@ -236,10 +244,14 @@ | |
mac->autoneg = 1; | |
adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; | |
break; | |
- case SPEED_1000 + DUPLEX_HALF: /* not supported */ | |
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */ | |
default: | |
goto err_inval; | |
} | |
+ | |
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ | |
+ adapter->hw.phy.mdix = AUTO_ALL_MODES; | |
+ | |
return 0; | |
err_inval: | |
@@ -252,15 +264,36 @@ | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct e1000_hw *hw = &adapter->hw; | |
+ int ret_val = 0; | |
- /* | |
- * When SoL/IDER sessions are active, autoneg/speed/duplex | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex | |
* cannot be changed | |
*/ | |
- if (e1000_check_reset_block(hw)) { | |
- e_err("Cannot change link characteristics when SoL/IDER is " | |
- "active.\n"); | |
- return -EINVAL; | |
+ if (hw->phy.ops.check_reset_block && | |
+ hw->phy.ops.check_reset_block(hw)) { | |
+ e_err("Cannot change link characteristics when SoL/IDER is active.\n"); | |
+ ret_val = -EINVAL; | |
+ goto out; | |
+ } | |
+ | |
+ /* MDI setting is only allowed when autoneg enabled because | |
+ * some hardware doesn't allow MDI setting when speed or | |
+ * duplex is forced. | |
+ */ | |
+ if (ecmd->eth_tp_mdix_ctrl) { | |
+ if (hw->phy.media_type != e1000_media_type_copper) { | |
+ ret_val = -EOPNOTSUPP; | |
+ goto out; | |
+ } | |
+ | |
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && | |
+ (ecmd->autoneg != AUTONEG_ENABLE)) { | |
+ e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); | |
+ ret_val = -EINVAL; | |
+ goto out; | |
+ } | |
} | |
while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | |
@@ -270,25 +303,34 @@ | |
hw->mac.autoneg = 1; | |
if (hw->phy.media_type == e1000_media_type_fiber) | |
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | | |
- ADVERTISED_FIBRE | | |
- ADVERTISED_Autoneg; | |
+ ADVERTISED_FIBRE | ADVERTISED_Autoneg; | |
else | |
hw->phy.autoneg_advertised = ecmd->advertising | | |
- ADVERTISED_TP | | |
- ADVERTISED_Autoneg; | |
+ ADVERTISED_TP | ADVERTISED_Autoneg; | |
ecmd->advertising = hw->phy.autoneg_advertised; | |
if (adapter->fc_autoneg) | |
hw->fc.requested_mode = e1000_fc_default; | |
} else { | |
u32 speed = ethtool_cmd_speed(ecmd); | |
+ /* calling this overrides forced MDI setting */ | |
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { | |
- clear_bit(__E1000_RESETTING, &adapter->state); | |
- return -EINVAL; | |
+ ret_val = -EINVAL; | |
+ goto out; | |
} | |
} | |
- /* reset the link */ | |
+ /* MDI-X => 2; MDI => 1; Auto => 3 */ | |
+ if (ecmd->eth_tp_mdix_ctrl) { | |
+ /* fix up the value for auto (3 => 0) as zero is mapped | |
+ * internally to auto | |
+ */ | |
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) | |
+ hw->phy.mdix = AUTO_ALL_MODES; | |
+ else | |
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; | |
+ } | |
+ /* reset the link */ | |
if (netif_running(adapter->netdev)) { | |
e1000e_down(adapter); | |
e1000e_up(adapter); | |
@@ -296,8 +338,10 @@ | |
e1000e_reset(adapter); | |
} | |
+out: | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
clear_bit(__E1000_RESETTING, &adapter->state); | |
- return 0; | |
+ return ret_val; | |
} | |
static void e1000_get_pauseparam(struct net_device *netdev, | |
@@ -307,7 +351,7 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
pause->autoneg = | |
- (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); | |
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); | |
if (hw->fc.current_mode == e1000_fc_rx_pause) { | |
pause->rx_pause = 1; | |
@@ -331,6 +375,8 @@ | |
while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | |
usleep_range(1000, 2000); | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
if (adapter->fc_autoneg == AUTONEG_ENABLE) { | |
hw->fc.requested_mode = e1000_fc_default; | |
if (netif_running(adapter->netdev)) { | |
@@ -363,6 +409,7 @@ | |
} | |
out: | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
clear_bit(__E1000_RESETTING, &adapter->state); | |
return retval; | |
} | |
@@ -379,9 +426,9 @@ | |
adapter->msg_enable = data; | |
} | |
-static int e1000_get_regs_len(struct net_device *netdev) | |
+static int e1000_get_regs_len(struct net_device __always_unused *netdev) | |
{ | |
-#define E1000_REGS_LEN 32 /* overestimate */ | |
+#define E1000_REGS_LEN 32 /* overestimate */ | |
return E1000_REGS_LEN * sizeof(u32); | |
} | |
@@ -393,31 +440,33 @@ | |
u32 *regs_buff = p; | |
u16 phy_data; | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
memset(p, 0, E1000_REGS_LEN * sizeof(u32)); | |
regs->version = (1 << 24) | (adapter->pdev->revision << 16) | | |
- adapter->pdev->device; | |
+ adapter->pdev->device; | |
- regs_buff[0] = er32(CTRL); | |
- regs_buff[1] = er32(STATUS); | |
+ regs_buff[0] = er32(CTRL); | |
+ regs_buff[1] = er32(STATUS); | |
- regs_buff[2] = er32(RCTL); | |
- regs_buff[3] = er32(RDLEN); | |
- regs_buff[4] = er32(RDH); | |
- regs_buff[5] = er32(RDT); | |
- regs_buff[6] = er32(RDTR); | |
- | |
- regs_buff[7] = er32(TCTL); | |
- regs_buff[8] = er32(TDLEN); | |
- regs_buff[9] = er32(TDH); | |
- regs_buff[10] = er32(TDT); | |
+ regs_buff[2] = er32(RCTL); | |
+ regs_buff[3] = er32(RDLEN(0)); | |
+ regs_buff[4] = er32(RDH(0)); | |
+ regs_buff[5] = er32(RDT(0)); | |
+ regs_buff[6] = er32(RDTR); | |
+ | |
+ regs_buff[7] = er32(TCTL); | |
+ regs_buff[8] = er32(TDLEN(0)); | |
+ regs_buff[9] = er32(TDH(0)); | |
+ regs_buff[10] = er32(TDT(0)); | |
regs_buff[11] = er32(TIDV); | |
- regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ | |
+ regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ | |
/* ethtool doesn't use anything past this point, so all this | |
- * code is likely legacy junk for apps that may or may not | |
- * exist */ | |
+ * code is likely legacy junk for apps that may or may not exist | |
+ */ | |
if (hw->phy.type == e1000_phy_m88) { | |
e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | |
regs_buff[13] = (u32)phy_data; /* cable length */ | |
@@ -433,10 +482,12 @@ | |
regs_buff[22] = adapter->phy_stats.receive_errors; | |
regs_buff[23] = regs_buff[13]; /* mdix mode */ | |
} | |
- regs_buff[21] = 0; /* was idle_errors */ | |
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); | |
- regs_buff[24] = (u32)phy_data; /* phy local receiver status */ | |
- regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ | |
+ regs_buff[21] = 0; /* was idle_errors */ | |
+ e1e_rphy(hw, MII_STAT1000, &phy_data); | |
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */ | |
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ | |
+ | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
} | |
static int e1000_get_eeprom_len(struct net_device *netdev) | |
@@ -464,11 +515,13 @@ | |
first_word = eeprom->offset >> 1; | |
last_word = (eeprom->offset + eeprom->len - 1) >> 1; | |
- eeprom_buff = kmalloc(sizeof(u16) * | |
- (last_word - first_word + 1), GFP_KERNEL); | |
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1), | |
+ GFP_KERNEL); | |
if (!eeprom_buff) | |
return -ENOMEM; | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
if (hw->nvm.type == e1000_nvm_eeprom_spi) { | |
ret_val = e1000_read_nvm(hw, first_word, | |
last_word - first_word + 1, | |
@@ -476,12 +529,14 @@ | |
} else { | |
for (i = 0; i < last_word - first_word + 1; i++) { | |
ret_val = e1000_read_nvm(hw, first_word + i, 1, | |
- &eeprom_buff[i]); | |
+ &eeprom_buff[i]); | |
if (ret_val) | |
break; | |
} | |
} | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
+ | |
if (ret_val) { | |
/* a read error occurred, throw away the result */ | |
memset(eeprom_buff, 0xff, sizeof(u16) * | |
@@ -514,7 +569,8 @@ | |
if (eeprom->len == 0) | |
return -EOPNOTSUPP; | |
- if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) | |
+ if (eeprom->magic != | |
+ (adapter->pdev->vendor | (adapter->pdev->device << 16))) | |
return -EFAULT; | |
if (adapter->flags & FLAG_READ_ONLY_NVM) | |
@@ -530,17 +586,19 @@ | |
ptr = (void *)eeprom_buff; | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
if (eeprom->offset & 1) { | |
/* need read/modify/write of first changed EEPROM word */ | |
/* only the second byte of the word is being modified */ | |
ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); | |
ptr++; | |
} | |
- if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) | |
+ if (((eeprom->offset + eeprom->len) & 1) && (!ret_val)) | |
/* need read/modify/write of last changed EEPROM word */ | |
/* only the first byte of the word is being modified */ | |
ret_val = e1000_read_nvm(hw, last_word, 1, | |
- &eeprom_buff[last_word - first_word]); | |
+ &eeprom_buff[last_word - first_word]); | |
if (ret_val) | |
goto out; | |
@@ -552,7 +610,7 @@ | |
memcpy(ptr, bytes, eeprom->len); | |
for (i = 0; i < last_word - first_word + 1; i++) | |
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); | |
+ cpu_to_le16s(&eeprom_buff[i]); | |
ret_val = e1000_write_nvm(hw, first_word, | |
last_word - first_word + 1, eeprom_buff); | |
@@ -560,8 +618,7 @@ | |
if (ret_val) | |
goto out; | |
- /* | |
- * Update the checksum over the first part of the EEPROM if needed | |
+ /* Update the checksum over the first part of the EEPROM if needed | |
* and flush shadow RAM for applicable controllers | |
*/ | |
if ((first_word <= NVM_CHECKSUM_REG) || | |
@@ -571,6 +628,7 @@ | |
ret_val = e1000e_update_nvm_checksum(hw); | |
out: | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
kfree(eeprom_buff); | |
return ret_val; | |
} | |
@@ -580,20 +638,18 @@ | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
- strlcpy(drvinfo->driver, e1000e_driver_name, | |
- sizeof(drvinfo->driver)); | |
+ strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); | |
strlcpy(drvinfo->version, e1000e_driver_version, | |
sizeof(drvinfo->version)); | |
- /* | |
- * EEPROM image version # is reported as firmware version # for | |
+ /* EEPROM image version # is reported as firmware version # for | |
* PCI-E controllers | |
*/ | |
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), | |
- "%d.%d-%d", | |
- (adapter->eeprom_vers & 0xF000) >> 12, | |
- (adapter->eeprom_vers & 0x0FF0) >> 4, | |
- (adapter->eeprom_vers & 0x000F)); | |
+ "%d.%d-%d", | |
+ (adapter->eeprom_vers & 0xF000) >> 12, | |
+ (adapter->eeprom_vers & 0x0FF0) >> 4, | |
+ (adapter->eeprom_vers & 0x000F)); | |
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), | |
sizeof(drvinfo->bus_info)); | |
@@ -605,94 +661,114 @@ | |
struct ethtool_ringparam *ring) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
ring->rx_max_pending = E1000_MAX_RXD; | |
ring->tx_max_pending = E1000_MAX_TXD; | |
- ring->rx_pending = rx_ring->count; | |
- ring->tx_pending = tx_ring->count; | |
+ ring->rx_pending = adapter->rx_ring_count; | |
+ ring->tx_pending = adapter->tx_ring_count; | |
} | |
static int e1000_set_ringparam(struct net_device *netdev, | |
struct ethtool_ringparam *ring) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
- struct e1000_ring *tx_ring, *tx_old; | |
- struct e1000_ring *rx_ring, *rx_old; | |
- int err; | |
+ struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; | |
+ int err = 0, size = sizeof(struct e1000_ring); | |
+ bool set_tx = false, set_rx = false; | |
+ u16 new_rx_count, new_tx_count; | |
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | |
return -EINVAL; | |
+ new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD, | |
+ E1000_MAX_RXD); | |
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); | |
+ | |
+ new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD, | |
+ E1000_MAX_TXD); | |
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); | |
+ | |
+ if ((new_tx_count == adapter->tx_ring_count) && | |
+ (new_rx_count == adapter->rx_ring_count)) | |
+ /* nothing to do */ | |
+ return 0; | |
+ | |
while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | |
usleep_range(1000, 2000); | |
- if (netif_running(adapter->netdev)) | |
- e1000e_down(adapter); | |
+ if (!netif_running(adapter->netdev)) { | |
+ /* Set counts now and allocate resources during open() */ | |
+ adapter->tx_ring->count = new_tx_count; | |
+ adapter->rx_ring->count = new_rx_count; | |
+ adapter->tx_ring_count = new_tx_count; | |
+ adapter->rx_ring_count = new_rx_count; | |
+ goto clear_reset; | |
+ } | |
- tx_old = adapter->tx_ring; | |
- rx_old = adapter->rx_ring; | |
+ set_tx = (new_tx_count != adapter->tx_ring_count); | |
+ set_rx = (new_rx_count != adapter->rx_ring_count); | |
- err = -ENOMEM; | |
- tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); | |
- if (!tx_ring) | |
- goto err_alloc_tx; | |
- | |
- rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); | |
- if (!rx_ring) | |
- goto err_alloc_rx; | |
- | |
- adapter->tx_ring = tx_ring; | |
- adapter->rx_ring = rx_ring; | |
- | |
- rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); | |
- rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); | |
- rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); | |
- | |
- tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); | |
- tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); | |
- tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); | |
+ /* Allocate temporary storage for ring updates */ | |
+ if (set_tx) { | |
+ temp_tx = vmalloc(size); | |
+ if (!temp_tx) { | |
+ err = -ENOMEM; | |
+ goto free_temp; | |
+ } | |
+ } | |
+ if (set_rx) { | |
+ temp_rx = vmalloc(size); | |
+ if (!temp_rx) { | |
+ err = -ENOMEM; | |
+ goto free_temp; | |
+ } | |
+ } | |
- if (netif_running(adapter->netdev)) { | |
- /* Try to get new resources before deleting old */ | |
- err = e1000e_setup_rx_resources(adapter); | |
- if (err) | |
- goto err_setup_rx; | |
- err = e1000e_setup_tx_resources(adapter); | |
- if (err) | |
- goto err_setup_tx; | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
- /* | |
- * restore the old in order to free it, | |
- * then add in the new | |
- */ | |
- adapter->rx_ring = rx_old; | |
- adapter->tx_ring = tx_old; | |
- e1000e_free_rx_resources(adapter); | |
- e1000e_free_tx_resources(adapter); | |
- kfree(tx_old); | |
- kfree(rx_old); | |
- adapter->rx_ring = rx_ring; | |
- adapter->tx_ring = tx_ring; | |
- err = e1000e_up(adapter); | |
+ e1000e_down(adapter); | |
+ | |
+ /* We can't just free everything and then setup again, because the | |
+ * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring | |
+ * structs. First, attempt to allocate new resources... | |
+ */ | |
+ if (set_tx) { | |
+ memcpy(temp_tx, adapter->tx_ring, size); | |
+ temp_tx->count = new_tx_count; | |
+ err = e1000e_setup_tx_resources(temp_tx); | |
if (err) | |
goto err_setup; | |
} | |
+ if (set_rx) { | |
+ memcpy(temp_rx, adapter->rx_ring, size); | |
+ temp_rx->count = new_rx_count; | |
+ err = e1000e_setup_rx_resources(temp_rx); | |
+ if (err) | |
+ goto err_setup_rx; | |
+ } | |
+ | |
+ /* ...then free the old resources and copy back any new ring data */ | |
+ if (set_tx) { | |
+ e1000e_free_tx_resources(adapter->tx_ring); | |
+ memcpy(adapter->tx_ring, temp_tx, size); | |
+ adapter->tx_ring_count = new_tx_count; | |
+ } | |
+ if (set_rx) { | |
+ e1000e_free_rx_resources(adapter->rx_ring); | |
+ memcpy(adapter->rx_ring, temp_rx, size); | |
+ adapter->rx_ring_count = new_rx_count; | |
+ } | |
- clear_bit(__E1000_RESETTING, &adapter->state); | |
- return 0; | |
-err_setup_tx: | |
- e1000e_free_rx_resources(adapter); | |
err_setup_rx: | |
- adapter->rx_ring = rx_old; | |
- adapter->tx_ring = tx_old; | |
- kfree(rx_ring); | |
-err_alloc_rx: | |
- kfree(tx_ring); | |
-err_alloc_tx: | |
- e1000e_up(adapter); | |
+ if (err && set_tx) | |
+ e1000e_free_tx_resources(temp_tx); | |
err_setup: | |
+ e1000e_up(adapter); | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
+free_temp: | |
+ vfree(temp_tx); | |
+ vfree(temp_rx); | |
+clear_reset: | |
clear_bit(__E1000_RESETTING, &adapter->state); | |
return err; | |
} | |
@@ -702,14 +778,15 @@ | |
{ | |
u32 pat, val; | |
static const u32 test[] = { | |
- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; | |
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF | |
+ }; | |
for (pat = 0; pat < ARRAY_SIZE(test); pat++) { | |
E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, | |
(test[pat] & write)); | |
val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); | |
if (val != (test[pat] & write & mask)) { | |
- e_err("pattern test reg %04X failed: got 0x%08X " | |
- "expected 0x%08X\n", reg + offset, val, | |
+ e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", | |
+ reg + (offset << 2), val, | |
(test[pat] & write & mask)); | |
*data = reg; | |
return 1; | |
@@ -725,13 +802,14 @@ | |
__ew32(&adapter->hw, reg, write & mask); | |
val = __er32(&adapter->hw, reg); | |
if ((write & mask) != (val & mask)) { | |
- e_err("set/check reg %04X test failed: got 0x%08X " | |
- "expected 0x%08X\n", reg, (val & mask), (write & mask)); | |
+ e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", | |
+ reg, (val & mask), (write & mask)); | |
*data = reg; | |
return 1; | |
} | |
return 0; | |
} | |
+ | |
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ | |
do { \ | |
if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ | |
@@ -756,19 +834,19 @@ | |
u32 i; | |
u32 toggle; | |
u32 mask; | |
+ u32 wlock_mac = 0; | |
- /* | |
- * The status register is Read Only, so a write should fail. | |
- * Some bits that get toggled are ignored. | |
+ /* The status register is Read Only, so a write should fail. | |
+ * Some bits that get toggled are ignored. There are several bits | |
+ * on newer hardware that are r/w. | |
*/ | |
switch (mac->type) { | |
- /* there are several bits on newer hardware that are r/w */ | |
case e1000_82571: | |
case e1000_82572: | |
case e1000_80003es2lan: | |
toggle = 0x7FFFF3FF; | |
break; | |
- default: | |
+ default: | |
toggle = 0x7FFFF033; | |
break; | |
} | |
@@ -778,8 +856,8 @@ | |
ew32(STATUS, toggle); | |
after = er32(STATUS) & toggle; | |
if (value != after) { | |
- e_err("failed STATUS register test got: 0x%08X expected: " | |
- "0x%08X\n", after, value); | |
+ e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n", | |
+ after, value); | |
*data = 1; | |
return 1; | |
} | |
@@ -794,15 +872,15 @@ | |
} | |
REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); | |
- REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | |
- REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); | |
- REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); | |
- REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); | |
+ REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); | |
+ REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); | |
+ REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); | |
+ REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); | |
REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); | |
REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); | |
REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); | |
- REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | |
- REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); | |
+ REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); | |
+ REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); | |
REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); | |
@@ -811,29 +889,57 @@ | |
REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); | |
REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); | |
- REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | |
+ REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); | |
if (!(adapter->flags & FLAG_IS_ICH)) | |
REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); | |
- REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | |
+ REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); | |
REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); | |
mask = 0x8003FFFF; | |
switch (mac->type) { | |
case e1000_ich10lan: | |
case e1000_pchlan: | |
case e1000_pch2lan: | |
+ case e1000_pch_lpt: | |
mask |= (1 << 18); | |
break; | |
default: | |
break; | |
} | |
- for (i = 0; i < mac->rar_entry_count; i++) | |
- REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), | |
- mask, 0xFFFFFFFF); | |
+ | |
+ if (mac->type == e1000_pch_lpt) | |
+ wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> | |
+ E1000_FWSM_WLOCK_MAC_SHIFT; | |
+ | |
+ for (i = 0; i < mac->rar_entry_count; i++) { | |
+ if (mac->type == e1000_pch_lpt) { | |
+ /* Cannot test write-protected SHRAL[n] registers */ | |
+ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) | |
+ continue; | |
+ | |
+ /* SHRAH[9] different than the others */ | |
+ if (i == 10) | |
+ mask |= (1 << 30); | |
+ else | |
+ mask &= ~(1 << 30); | |
+ } | |
+ if (mac->type == e1000_pch2lan) { | |
+ /* SHRAH[0,1,2] different than previous */ | |
+ if (i == 7) | |
+ mask &= 0xFFF4FFFF; | |
+ /* SHRAH[3] different than SHRAH[0,1,2] */ | |
+ if (i == 10) | |
+ mask |= (1 << 30); | |
+ } | |
+ | |
+ REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, | |
+ 0xFFFFFFFF); | |
+ } | |
for (i = 0; i < mac->mta_reg_count; i++) | |
REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); | |
*data = 0; | |
+ | |
return 0; | |
} | |
@@ -854,15 +960,15 @@ | |
} | |
/* If Checksum is not Correct return error else test passed */ | |
- if ((checksum != (u16) NVM_SUM) && !(*data)) | |
+ if ((checksum != (u16)NVM_SUM) && !(*data)) | |
*data = 2; | |
return *data; | |
} | |
-static irqreturn_t e1000_test_intr(int irq, void *data) | |
+static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) | |
{ | |
- struct net_device *netdev = (struct net_device *) data; | |
+ struct net_device *netdev = (struct net_device *)data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct e1000_hw *hw = &adapter->hw; | |
@@ -895,8 +1001,8 @@ | |
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, | |
netdev)) { | |
shared_int = 0; | |
- } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, | |
- netdev->name, netdev)) { | |
+ } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name, | |
+ netdev)) { | |
*data = 1; | |
ret_val = -1; | |
goto out; | |
@@ -928,8 +1034,7 @@ | |
} | |
if (!shared_int) { | |
- /* | |
- * Disable the interrupt to be reported in | |
+ /* Disable the interrupt to be reported in | |
* the cause register and then force the same | |
* interrupt and see if one gets posted. If | |
* an interrupt was posted to the bus, the | |
@@ -947,8 +1052,7 @@ | |
} | |
} | |
- /* | |
- * Enable the interrupt to be reported in | |
+ /* Enable the interrupt to be reported in | |
* the cause register and then force the same | |
* interrupt and see if one gets posted. If | |
* an interrupt was not posted to the bus, the | |
@@ -966,8 +1070,7 @@ | |
} | |
if (!shared_int) { | |
- /* | |
- * Disable the other interrupts to be reported in | |
+ /* Disable the other interrupts to be reported in | |
* the cause register and then force the other | |
* interrupts and see if any get posted. If | |
* an interrupt was posted to the bus, the | |
@@ -1009,28 +1112,33 @@ | |
struct e1000_ring *tx_ring = &adapter->test_tx_ring; | |
struct e1000_ring *rx_ring = &adapter->test_rx_ring; | |
struct pci_dev *pdev = adapter->pdev; | |
+ struct e1000_buffer *buffer_info; | |
int i; | |
if (tx_ring->desc && tx_ring->buffer_info) { | |
for (i = 0; i < tx_ring->count; i++) { | |
- if (tx_ring->buffer_info[i].dma) | |
+ buffer_info = &tx_ring->buffer_info[i]; | |
+ | |
+ if (buffer_info->dma) | |
dma_unmap_single(&pdev->dev, | |
- tx_ring->buffer_info[i].dma, | |
- tx_ring->buffer_info[i].length, | |
- DMA_TO_DEVICE); | |
- if (tx_ring->buffer_info[i].skb) | |
- dev_kfree_skb(tx_ring->buffer_info[i].skb); | |
+ buffer_info->dma, | |
+ buffer_info->length, | |
+ DMA_TO_DEVICE); | |
+ if (buffer_info->skb) | |
+ dev_kfree_skb(buffer_info->skb); | |
} | |
} | |
if (rx_ring->desc && rx_ring->buffer_info) { | |
for (i = 0; i < rx_ring->count; i++) { | |
- if (rx_ring->buffer_info[i].dma) | |
+ buffer_info = &rx_ring->buffer_info[i]; | |
+ | |
+ if (buffer_info->dma) | |
dma_unmap_single(&pdev->dev, | |
- rx_ring->buffer_info[i].dma, | |
- 2048, DMA_FROM_DEVICE); | |
- if (rx_ring->buffer_info[i].skb) | |
- dev_kfree_skb(rx_ring->buffer_info[i].skb); | |
+ buffer_info->dma, | |
+ 2048, DMA_FROM_DEVICE); | |
+ if (buffer_info->skb) | |
+ dev_kfree_skb(buffer_info->skb); | |
} | |
} | |
@@ -1067,9 +1175,8 @@ | |
tx_ring->count = E1000_DEFAULT_TXD; | |
tx_ring->buffer_info = kcalloc(tx_ring->count, | |
- sizeof(struct e1000_buffer), | |
- GFP_KERNEL); | |
- if (!(tx_ring->buffer_info)) { | |
+ sizeof(struct e1000_buffer), GFP_KERNEL); | |
+ if (!tx_ring->buffer_info) { | |
ret_val = 1; | |
goto err_nomem; | |
} | |
@@ -1085,11 +1192,11 @@ | |
tx_ring->next_to_use = 0; | |
tx_ring->next_to_clean = 0; | |
- ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); | |
- ew32(TDBAH, ((u64) tx_ring->dma >> 32)); | |
- ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); | |
- ew32(TDH, 0); | |
- ew32(TDT, 0); | |
+ ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF)); | |
+ ew32(TDBAH(0), ((u64)tx_ring->dma >> 32)); | |
+ ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); | |
+ ew32(TDH(0), 0); | |
+ ew32(TDT(0), 0); | |
ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | | |
E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | |
E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); | |
@@ -1108,8 +1215,8 @@ | |
tx_ring->buffer_info[i].skb = skb; | |
tx_ring->buffer_info[i].length = skb->len; | |
tx_ring->buffer_info[i].dma = | |
- dma_map_single(&pdev->dev, skb->data, skb->len, | |
- DMA_TO_DEVICE); | |
+ dma_map_single(&pdev->dev, skb->data, skb->len, | |
+ DMA_TO_DEVICE); | |
if (dma_mapping_error(&pdev->dev, | |
tx_ring->buffer_info[i].dma)) { | |
ret_val = 4; | |
@@ -1129,9 +1236,8 @@ | |
rx_ring->count = E1000_DEFAULT_RXD; | |
rx_ring->buffer_info = kcalloc(rx_ring->count, | |
- sizeof(struct e1000_buffer), | |
- GFP_KERNEL); | |
- if (!(rx_ring->buffer_info)) { | |
+ sizeof(struct e1000_buffer), GFP_KERNEL); | |
+ if (!rx_ring->buffer_info) { | |
ret_val = 5; | |
goto err_nomem; | |
} | |
@@ -1149,16 +1255,16 @@ | |
rctl = er32(RCTL); | |
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) | |
ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
- ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); | |
- ew32(RDBAH, ((u64) rx_ring->dma >> 32)); | |
- ew32(RDLEN, rx_ring->size); | |
- ew32(RDH, 0); | |
- ew32(RDT, 0); | |
+ ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); | |
+ ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); | |
+ ew32(RDLEN(0), rx_ring->size); | |
+ ew32(RDH(0), 0); | |
+ ew32(RDT(0), 0); | |
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | | |
- E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | | |
- E1000_RCTL_SBP | E1000_RCTL_SECRC | | |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | |
+ E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | | |
+ E1000_RCTL_SBP | E1000_RCTL_SECRC | | |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | |
ew32(RCTL, rctl); | |
for (i = 0; i < rx_ring->count; i++) { | |
@@ -1173,8 +1279,8 @@ | |
skb_reserve(skb, NET_IP_ALIGN); | |
rx_ring->buffer_info[i].skb = skb; | |
rx_ring->buffer_info[i].dma = | |
- dma_map_single(&pdev->dev, skb->data, 2048, | |
- DMA_FROM_DEVICE); | |
+ dma_map_single(&pdev->dev, skb->data, 2048, | |
+ DMA_FROM_DEVICE); | |
if (dma_mapping_error(&pdev->dev, | |
rx_ring->buffer_info[i].dma)) { | |
ret_val = 8; | |
@@ -1213,7 +1319,7 @@ | |
if (hw->phy.type == e1000_phy_ife) { | |
/* force 100, set loopback */ | |
- e1e_wphy(hw, PHY_CONTROL, 0x6100); | |
+ e1e_wphy(hw, MII_BMCR, 0x6100); | |
/* Now set up the MAC to the same speed/duplex as the PHY. */ | |
ctrl_reg = er32(CTRL); | |
@@ -1225,7 +1331,7 @@ | |
ew32(CTRL, ctrl_reg); | |
e1e_flush(); | |
- udelay(500); | |
+ usleep_range(500, 1000); | |
return 0; | |
} | |
@@ -1236,9 +1342,9 @@ | |
/* Auto-MDI/MDIX Off */ | |
e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); | |
/* reset to update Auto-MDI/MDIX */ | |
- e1e_wphy(hw, PHY_CONTROL, 0x9140); | |
+ e1e_wphy(hw, MII_BMCR, 0x9140); | |
/* autoneg off */ | |
- e1e_wphy(hw, PHY_CONTROL, 0x8140); | |
+ e1e_wphy(hw, MII_BMCR, 0x8140); | |
break; | |
case e1000_phy_gg82563: | |
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); | |
@@ -1250,8 +1356,8 @@ | |
phy_reg |= 0x006; | |
e1e_wphy(hw, PHY_REG(2, 21), phy_reg); | |
/* Assert SW reset for above settings to take effect */ | |
- e1000e_commit_phy(hw); | |
- mdelay(1); | |
+ hw->phy.ops.commit(hw); | |
+ usleep_range(1000, 2000); | |
/* Force Full Duplex */ | |
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); | |
e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); | |
@@ -1284,7 +1390,6 @@ | |
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); | |
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); | |
/* Enable loopback on the PHY */ | |
-#define I82577_PHY_LBK_CTRL 19 | |
e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); | |
break; | |
default: | |
@@ -1292,8 +1397,8 @@ | |
} | |
/* force 1000, set loopback */ | |
- e1e_wphy(hw, PHY_CONTROL, 0x4140); | |
- mdelay(250); | |
+ e1e_wphy(hw, MII_BMCR, 0x4140); | |
+ msleep(250); | |
/* Now set up the MAC to the same speed/duplex as the PHY. */ | |
ctrl_reg = er32(CTRL); | |
@@ -1308,10 +1413,9 @@ | |
if (hw->phy.media_type == e1000_media_type_copper && | |
hw->phy.type == e1000_phy_m88) { | |
- ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | |
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | |
} else { | |
- /* | |
- * Set the ILOS bit on the fiber Nic if half duplex link is | |
+ /* Set the ILOS bit on the fiber Nic if half duplex link is | |
* detected. | |
*/ | |
if ((er32(STATUS) & E1000_STATUS_FD) == 0) | |
@@ -1320,14 +1424,13 @@ | |
ew32(CTRL, ctrl_reg); | |
- /* | |
- * Disable the receiver on the PHY so when a cable is plugged in, the | |
+ /* Disable the receiver on the PHY so when a cable is plugged in, the | |
* PHY does not begin to autoneg when a cable is reconnected to the NIC. | |
*/ | |
if (hw->phy.type == e1000_phy_m88) | |
e1000_phy_disable_receiver(adapter); | |
- udelay(500); | |
+ usleep_range(500, 1000); | |
return 0; | |
} | |
@@ -1336,12 +1439,11 @@ | |
{ | |
struct e1000_hw *hw = &adapter->hw; | |
u32 ctrl = er32(CTRL); | |
- int link = 0; | |
+ int link; | |
/* special requirements for 82571/82572 fiber adapters */ | |
- /* | |
- * jump through hoops to make sure link is up because serdes | |
+ /* jump through hoops to make sure link is up because serdes | |
* link is hardwired up | |
*/ | |
ctrl |= E1000_CTRL_SLU; | |
@@ -1361,12 +1463,10 @@ | |
ew32(CTRL, ctrl); | |
} | |
- /* | |
- * special write to serdes control register to enable SerDes analog | |
+ /* special write to serdes control register to enable SerDes analog | |
* loopback | |
*/ | |
-#define E1000_SERDES_LB_ON 0x410 | |
- ew32(SCTL, E1000_SERDES_LB_ON); | |
+ ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); | |
e1e_flush(); | |
usleep_range(10000, 20000); | |
@@ -1380,8 +1480,7 @@ | |
u32 ctrlext = er32(CTRL_EXT); | |
u32 ctrl = er32(CTRL); | |
- /* | |
- * save CTRL_EXT to restore later, reuse an empty variable (unused | |
+ /* save CTRL_EXT to restore later, reuse an empty variable (unused | |
* on mac_type 80003es2lan) | |
*/ | |
adapter->tx_fifo_head = ctrlext; | |
@@ -1461,8 +1560,7 @@ | |
case e1000_82572: | |
if (hw->phy.media_type == e1000_media_type_fiber || | |
hw->phy.media_type == e1000_media_type_internal_serdes) { | |
-#define E1000_SERDES_LB_OFF 0x400 | |
- ew32(SCTL, E1000_SERDES_LB_OFF); | |
+ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); | |
e1e_flush(); | |
usleep_range(10000, 20000); | |
break; | |
@@ -1472,11 +1570,12 @@ | |
hw->mac.autoneg = 1; | |
if (hw->phy.type == e1000_phy_gg82563) | |
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); | |
- e1e_rphy(hw, PHY_CONTROL, &phy_reg); | |
- if (phy_reg & MII_CR_LOOPBACK) { | |
- phy_reg &= ~MII_CR_LOOPBACK; | |
- e1e_wphy(hw, PHY_CONTROL, phy_reg); | |
- e1000e_commit_phy(hw); | |
+ e1e_rphy(hw, MII_BMCR, &phy_reg); | |
+ if (phy_reg & BMCR_LOOPBACK) { | |
+ phy_reg &= ~BMCR_LOOPBACK; | |
+ e1e_wphy(hw, MII_BMCR, phy_reg); | |
+ if (hw->phy.ops.commit) | |
+ hw->phy.ops.commit(hw); | |
} | |
break; | |
} | |
@@ -1498,7 +1597,7 @@ | |
frame_size &= ~1; | |
if (*(skb->data + 3) == 0xFF) | |
if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && | |
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) | |
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) | |
return 0; | |
return 13; | |
} | |
@@ -1509,16 +1608,16 @@ | |
struct e1000_ring *rx_ring = &adapter->test_rx_ring; | |
struct pci_dev *pdev = adapter->pdev; | |
struct e1000_hw *hw = &adapter->hw; | |
+ struct e1000_buffer *buffer_info; | |
int i, j, k, l; | |
int lc; | |
int good_cnt; | |
int ret_val = 0; | |
unsigned long time; | |
- ew32(RDT, rx_ring->count - 1); | |
+ ew32(RDT(0), rx_ring->count - 1); | |
- /* | |
- * Calculate the loop count based on the largest descriptor ring | |
+ /* Calculate the loop count based on the largest descriptor ring | |
* The idea is to wrap the largest ring a number of times using 64 | |
* send/receive pairs during each loop | |
*/ | |
@@ -1530,60 +1629,65 @@ | |
k = 0; | |
l = 0; | |
- for (j = 0; j <= lc; j++) { /* loop count loop */ | |
- for (i = 0; i < 64; i++) { /* send the packets */ | |
- e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, | |
- 1024); | |
+ /* loop count loop */ | |
+ for (j = 0; j <= lc; j++) { | |
+ /* send the packets */ | |
+ for (i = 0; i < 64; i++) { | |
+ buffer_info = &tx_ring->buffer_info[k]; | |
+ | |
+ e1000_create_lbtest_frame(buffer_info->skb, 1024); | |
dma_sync_single_for_device(&pdev->dev, | |
- tx_ring->buffer_info[k].dma, | |
- tx_ring->buffer_info[k].length, | |
- DMA_TO_DEVICE); | |
+ buffer_info->dma, | |
+ buffer_info->length, | |
+ DMA_TO_DEVICE); | |
k++; | |
if (k == tx_ring->count) | |
k = 0; | |
} | |
- ew32(TDT, k); | |
+ ew32(TDT(0), k); | |
e1e_flush(); | |
msleep(200); | |
- time = jiffies; /* set the start time for the receive */ | |
+ time = jiffies; /* set the start time for the receive */ | |
good_cnt = 0; | |
- do { /* receive the sent packets */ | |
+ /* receive the sent packets */ | |
+ do { | |
+ buffer_info = &rx_ring->buffer_info[l]; | |
+ | |
dma_sync_single_for_cpu(&pdev->dev, | |
- rx_ring->buffer_info[l].dma, 2048, | |
- DMA_FROM_DEVICE); | |
+ buffer_info->dma, 2048, | |
+ DMA_FROM_DEVICE); | |
- ret_val = e1000_check_lbtest_frame( | |
- rx_ring->buffer_info[l].skb, 1024); | |
+ ret_val = e1000_check_lbtest_frame(buffer_info->skb, | |
+ 1024); | |
if (!ret_val) | |
good_cnt++; | |
l++; | |
if (l == rx_ring->count) | |
l = 0; | |
- /* | |
- * time + 20 msecs (200 msecs on 2.4) is more than | |
+ /* time + 20 msecs (200 msecs on 2.4) is more than | |
* enough time to complete the receives, if it's | |
* exceeded, break and error off | |
*/ | |
} while ((good_cnt < 64) && !time_after(jiffies, time + 20)); | |
if (good_cnt != 64) { | |
- ret_val = 13; /* ret_val is the same as mis-compare */ | |
+ ret_val = 13; /* ret_val is the same as mis-compare */ | |
break; | |
} | |
- if (jiffies >= (time + 20)) { | |
- ret_val = 14; /* error code for time out error */ | |
+ if (time_after(jiffies, time + 20)) { | |
+ ret_val = 14; /* error code for time out error */ | |
break; | |
} | |
- } /* end loop count loop */ | |
+ } | |
return ret_val; | |
} | |
static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | |
{ | |
- /* | |
- * PHY loopback cannot be performed if SoL/IDER | |
- * sessions are active | |
- */ | |
- if (e1000_check_reset_block(&adapter->hw)) { | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ | |
+ /* PHY loopback cannot be performed if SoL/IDER sessions are active */ | |
+ if (hw->phy.ops.check_reset_block && | |
+ hw->phy.ops.check_reset_block(hw)) { | |
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); | |
*data = 0; | |
goto out; | |
@@ -1615,8 +1719,7 @@ | |
int i = 0; | |
hw->mac.serdes_has_link = false; | |
- /* | |
- * On some blade server designs, link establishment | |
+ /* On some blade server designs, link establishment | |
* could take as long as 2-3 minutes | |
*/ | |
do { | |
@@ -1630,11 +1733,10 @@ | |
} else { | |
hw->mac.ops.check_for_link(hw); | |
if (hw->mac.autoneg) | |
- /* | |
- * On some Phy/switch combinations, link establishment | |
+ /* On some Phy/switch combinations, link establishment | |
* can take a few seconds more than expected. | |
*/ | |
- msleep(5000); | |
+ msleep_interruptible(5000); | |
if (!(er32(STATUS) & E1000_STATUS_LU)) | |
*data = 1; | |
@@ -1642,7 +1744,8 @@ | |
return *data; | |
} | |
-static int e1000e_get_sset_count(struct net_device *netdev, int sset) | |
+static int e1000e_get_sset_count(struct net_device __always_unused *netdev, | |
+ int sset) | |
{ | |
switch (sset) { | |
case ETH_SS_TEST: | |
@@ -1663,6 +1766,8 @@ | |
u8 autoneg; | |
bool if_running = netif_running(netdev); | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
set_bit(__E1000_TESTING, &adapter->state); | |
if (!if_running) { | |
@@ -1748,6 +1853,8 @@ | |
} | |
msleep_interruptible(4 * 1000); | |
+ | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
} | |
static void e1000_get_wol(struct net_device *netdev, | |
@@ -1770,8 +1877,7 @@ | |
wol->supported &= ~WAKE_UCAST; | |
if (adapter->wol & E1000_WUFC_EX) | |
- e_err("Interface does not support directed (unicast) " | |
- "frame wake-up packets\n"); | |
+ e_err("Interface does not support directed (unicast) frame wake-up packets\n"); | |
} | |
if (adapter->wol & E1000_WUFC_EX) | |
@@ -1823,6 +1929,8 @@ | |
switch (state) { | |
case ETHTOOL_ID_ACTIVE: | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
if (!hw->mac.ops.blink_led) | |
return 2; /* cycle on/off twice per second */ | |
@@ -1834,16 +1942,18 @@ | |
e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); | |
hw->mac.ops.led_off(hw); | |
hw->mac.ops.cleanup_led(hw); | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
break; | |
case ETHTOOL_ID_ON: | |
- adapter->hw.mac.ops.led_on(&adapter->hw); | |
+ hw->mac.ops.led_on(hw); | |
break; | |
case ETHTOOL_ID_OFF: | |
- adapter->hw.mac.ops.led_off(&adapter->hw); | |
+ hw->mac.ops.led_off(hw); | |
break; | |
} | |
+ | |
return 0; | |
} | |
@@ -1864,7 +1974,6 @@ | |
struct ethtool_coalesce *ec) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
- struct e1000_hw *hw = &adapter->hw; | |
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || | |
((ec->rx_coalesce_usecs > 4) && | |
@@ -1873,7 +1982,8 @@ | |
return -EINVAL; | |
if (ec->rx_coalesce_usecs == 4) { | |
- adapter->itr = adapter->itr_setting = 4; | |
+ adapter->itr_setting = 4; | |
+ adapter->itr = adapter->itr_setting; | |
} else if (ec->rx_coalesce_usecs <= 3) { | |
adapter->itr = 20000; | |
adapter->itr_setting = ec->rx_coalesce_usecs; | |
@@ -1882,10 +1992,14 @@ | |
adapter->itr_setting = adapter->itr & ~3; | |
} | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
if (adapter->itr_setting != 0) | |
- ew32(ITR, 1000000000 / (adapter->itr * 256)); | |
+ e1000e_write_itr(adapter, adapter->itr); | |
else | |
- ew32(ITR, 0); | |
+ e1000e_write_itr(adapter, 0); | |
+ | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
return 0; | |
} | |
@@ -1900,13 +2014,15 @@ | |
if (!adapter->hw.mac.autoneg) | |
return -EINVAL; | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
e1000e_reinit_locked(adapter); | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
return 0; | |
} | |
static void e1000_get_ethtool_stats(struct net_device *netdev, | |
- struct ethtool_stats *stats, | |
+ struct ethtool_stats __always_unused *stats, | |
u64 *data) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
@@ -1914,16 +2030,21 @@ | |
int i; | |
char *p = NULL; | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
e1000e_get_stats64(netdev, &net_stats); | |
+ | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
+ | |
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | |
switch (e1000_gstrings_stats[i].type) { | |
case NETDEV_STATS: | |
- p = (char *) &net_stats + | |
- e1000_gstrings_stats[i].stat_offset; | |
+ p = (char *)&net_stats + | |
+ e1000_gstrings_stats[i].stat_offset; | |
break; | |
case E1000_STATS: | |
- p = (char *) adapter + | |
- e1000_gstrings_stats[i].stat_offset; | |
+ p = (char *)adapter + | |
+ e1000_gstrings_stats[i].stat_offset; | |
break; | |
default: | |
data[i] = 0; | |
@@ -1931,12 +2052,12 @@ | |
} | |
data[i] = (e1000_gstrings_stats[i].sizeof_stat == | |
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | |
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | |
} | |
} | |
-static void e1000_get_strings(struct net_device *netdev, u32 stringset, | |
- u8 *data) | |
+static void e1000_get_strings(struct net_device __always_unused *netdev, | |
+ u32 stringset, u8 *data) | |
{ | |
u8 *p = data; | |
int i; | |
@@ -1955,6 +2076,211 @@ | |
} | |
} | |
+static int e1000_get_rxnfc(struct net_device *netdev, | |
+ struct ethtool_rxnfc *info, | |
+ u32 __always_unused *rule_locs) | |
+{ | |
+ info->data = 0; | |
+ | |
+ switch (info->cmd) { | |
+ case ETHTOOL_GRXFH: { | |
+ struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ u32 mrqc; | |
+ | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ mrqc = er32(MRQC); | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
+ | |
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) | |
+ return 0; | |
+ | |
+ switch (info->flow_type) { | |
+ case TCP_V4_FLOW: | |
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) | |
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | |
+ /* fall through */ | |
+ case UDP_V4_FLOW: | |
+ case SCTP_V4_FLOW: | |
+ case AH_ESP_V4_FLOW: | |
+ case IPV4_FLOW: | |
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) | |
+ info->data |= RXH_IP_SRC | RXH_IP_DST; | |
+ break; | |
+ case TCP_V6_FLOW: | |
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) | |
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | |
+ /* fall through */ | |
+ case UDP_V6_FLOW: | |
+ case SCTP_V6_FLOW: | |
+ case AH_ESP_V6_FLOW: | |
+ case IPV6_FLOW: | |
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) | |
+ info->data |= RXH_IP_SRC | RXH_IP_DST; | |
+ break; | |
+ default: | |
+ break; | |
+ } | |
+ return 0; | |
+ } | |
+ default: | |
+ return -EOPNOTSUPP; | |
+ } | |
+} | |
+ | |
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) | |
+{ | |
+ struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data; | |
+ u32 ret_val; | |
+ | |
+ if (!(adapter->flags2 & FLAG2_HAS_EEE)) | |
+ return -EOPNOTSUPP; | |
+ | |
+ switch (hw->phy.type) { | |
+ case e1000_phy_82579: | |
+ cap_addr = I82579_EEE_CAPABILITY; | |
+ lpa_addr = I82579_EEE_LP_ABILITY; | |
+ pcs_stat_addr = I82579_EEE_PCS_STATUS; | |
+ break; | |
+ case e1000_phy_i217: | |
+ cap_addr = I217_EEE_CAPABILITY; | |
+ lpa_addr = I217_EEE_LP_ABILITY; | |
+ pcs_stat_addr = I217_EEE_PCS_STATUS; | |
+ break; | |
+ default: | |
+ return -EOPNOTSUPP; | |
+ } | |
+ | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
+ ret_val = hw->phy.ops.acquire(hw); | |
+ if (ret_val) { | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
+ return -EBUSY; | |
+ } | |
+ | |
+ /* EEE Capability */ | |
+ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); | |
+ if (ret_val) | |
+ goto release; | |
+ edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); | |
+ | |
+ /* EEE Advertised */ | |
+ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); | |
+ | |
+ /* EEE Link Partner Advertised */ | |
+ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); | |
+ if (ret_val) | |
+ goto release; | |
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); | |
+ | |
+ /* EEE PCS Status */ | |
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); | |
+ if (ret_val) | |
+ goto release; | |
+ if (hw->phy.type == e1000_phy_82579) | |
+ phy_data <<= 8; | |
+ | |
+ /* Result of the EEE auto negotiation - there is no register that | |
+ * has the status of the EEE negotiation so do a best-guess based | |
+ * on whether Tx or Rx LPI indications have been received. | |
+ */ | |
+ if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD)) | |
+ edata->eee_active = true; | |
+ | |
+ edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; | |
+ edata->tx_lpi_enabled = true; | |
+ edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; | |
+ | |
+release: | |
+ hw->phy.ops.release(hw); | |
+ if (ret_val) | |
+ ret_val = -ENODATA; | |
+ | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
+ | |
+ return ret_val; | |
+} | |
+ | |
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) | |
+{ | |
+ struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ struct ethtool_eee eee_curr; | |
+ s32 ret_val; | |
+ | |
+ ret_val = e1000e_get_eee(netdev, &eee_curr); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { | |
+ e_err("Setting EEE tx-lpi is not supported\n"); | |
+ return -EINVAL; | |
+ } | |
+ | |
+ if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) { | |
+ e_err("Setting EEE Tx LPI timer is not supported\n"); | |
+ return -EINVAL; | |
+ } | |
+ | |
+ if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { | |
+ e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n"); | |
+ return -EINVAL; | |
+ } | |
+ | |
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); | |
+ | |
+ hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; | |
+ | |
+ pm_runtime_get_sync(netdev->dev.parent); | |
+ | |
+ /* reset the link */ | |
+ if (netif_running(netdev)) | |
+ e1000e_reinit_locked(adapter); | |
+ else | |
+ e1000e_reset(adapter); | |
+ | |
+ pm_runtime_put_sync(netdev->dev.parent); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static int e1000e_get_ts_info(struct net_device *netdev, | |
+ struct ethtool_ts_info *info) | |
+{ | |
+ struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ | |
+ ethtool_op_get_ts_info(netdev, info); | |
+ | |
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) | |
+ return 0; | |
+ | |
+ info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | | |
+ SOF_TIMESTAMPING_RX_HARDWARE | | |
+ SOF_TIMESTAMPING_RAW_HARDWARE); | |
+ | |
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); | |
+ | |
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | | |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | | |
+ (1 << HWTSTAMP_FILTER_ALL)); | |
+ | |
+ if (adapter->ptp_clock) | |
+ info->phc_index = ptp_clock_index(adapter->ptp_clock); | |
+ | |
+ return 0; | |
+} | |
+ | |
static const struct ethtool_ops e1000_ethtool_ops = { | |
.get_settings = e1000_get_settings, | |
.set_settings = e1000_set_settings, | |
@@ -1981,6 +2307,10 @@ | |
.get_sset_count = e1000e_get_sset_count, | |
.get_coalesce = e1000_get_coalesce, | |
.set_coalesce = e1000_set_coalesce, | |
+ .get_rxnfc = e1000_get_rxnfc, | |
+ .get_ts_info = e1000e_get_ts_info, | |
+ .get_eee = e1000e_get_eee, | |
+ .set_eee = e1000e_set_eee, | |
}; | |
void e1000e_set_ethtool_ops(struct net_device *netdev) | |
diff -ru e1000e/hw.h /home/arch/linux/drivers/net/ethernet/intel/e1000e/hw.h | |
--- e1000e/hw.h 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/hw.h 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -29,319 +29,10 @@ | |
#ifndef _E1000_HW_H_ | |
#define _E1000_HW_H_ | |
-#include <linux/types.h> | |
- | |
-struct e1000_hw; | |
-struct e1000_adapter; | |
- | |
+#include "regs.h" | |
#include "defines.h" | |
-#define er32(reg) __er32(hw, E1000_##reg) | |
-#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) | |
-#define e1e_flush() er32(STATUS) | |
- | |
-#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ | |
- (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) | |
- | |
-#define E1000_READ_REG_ARRAY(a, reg, offset) \ | |
- (readl((a)->hw_addr + reg + ((offset) << 2))) | |
- | |
-enum e1e_registers { | |
- E1000_CTRL = 0x00000, /* Device Control - RW */ | |
- E1000_STATUS = 0x00008, /* Device Status - RO */ | |
- E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ | |
- E1000_EERD = 0x00014, /* EEPROM Read - RW */ | |
- E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ | |
- E1000_FLA = 0x0001C, /* Flash Access - RW */ | |
- E1000_MDIC = 0x00020, /* MDI Control - RW */ | |
- E1000_SCTL = 0x00024, /* SerDes Control - RW */ | |
- E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ | |
- E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ | |
- E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ | |
- E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ | |
- E1000_FCT = 0x00030, /* Flow Control Type - RW */ | |
- E1000_VET = 0x00038, /* VLAN Ether Type - RW */ | |
- E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ | |
- E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ | |
- E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ | |
- E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ | |
- E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ | |
- E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */ | |
- E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ | |
- E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */ | |
- E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */ | |
-#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2)) | |
- E1000_RCTL = 0x00100, /* Rx Control - RW */ | |
- E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ | |
- E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ | |
- E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ | |
- E1000_TCTL = 0x00400, /* Tx Control - RW */ | |
- E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ | |
- E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ | |
- E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ | |
- E1000_LEDCTL = 0x00E00, /* LED Control - RW */ | |
- E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ | |
- E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ | |
- E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ | |
-#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ | |
- E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ | |
- E1000_PBS = 0x01008, /* Packet Buffer Size */ | |
- E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ | |
- E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ | |
- E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ | |
- E1000_PBA_ECC = 0x01100, /* PBA ECC Register */ | |
- E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ | |
- E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ | |
- E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ | |
- E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ | |
- E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ | |
- E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ | |
- E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ | |
- E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ | |
- E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ | |
- E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ | |
- E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ | |
-#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) | |
- E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ | |
- | |
-/* Convenience macros | |
- * | |
- * Note: "_n" is the queue number of the register to be written to. | |
- * | |
- * Example usage: | |
- * E1000_RDBAL_REG(current_rx_queue) | |
- * | |
- */ | |
-#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) | |
- E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ | |
- E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ | |
- E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ | |
- E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ | |
- E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ | |
- E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ | |
- E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ | |
- E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ | |
-#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) | |
- E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ | |
- E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */ | |
-#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8)) | |
- E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ | |
- E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ | |
- E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ | |
- E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ | |
- E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ | |
- E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ | |
- E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ | |
- E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ | |
- E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ | |
- E1000_COLC = 0x04028, /* Collision Count - R/clr */ | |
- E1000_DC = 0x04030, /* Defer Count - R/clr */ | |
- E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ | |
- E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ | |
- E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ | |
- E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ | |
- E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ | |
- E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ | |
- E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ | |
- E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ | |
- E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ | |
- E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ | |
- E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ | |
- E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ | |
- E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ | |
- E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ | |
- E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ | |
- E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ | |
- E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ | |
- E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ | |
- E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ | |
- E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ | |
- E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ | |
- E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ | |
- E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ | |
- E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ | |
- E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ | |
- E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ | |
- E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ | |
- E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ | |
- E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ | |
- E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ | |
- E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ | |
- E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ | |
- E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ | |
- E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ | |
- E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ | |
- E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ | |
- E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ | |
- E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ | |
- E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ | |
- E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ | |
- E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ | |
- E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ | |
- E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ | |
- E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ | |
- E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ | |
- E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ | |
- E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ | |
- E1000_IAC = 0x04100, /* Interrupt Assertion Count */ | |
- E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ | |
- E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ | |
- E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ | |
- E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ | |
- E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ | |
- E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ | |
- E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ | |
- E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ | |
- E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ | |
- E1000_RFCTL = 0x05008, /* Receive Filter Control */ | |
- E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ | |
- E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */ | |
-#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8)) | |
-#define E1000_RA (E1000_RAL(0)) | |
- E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ | |
-#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) | |
- E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ | |
- E1000_WUC = 0x05800, /* Wakeup Control - RW */ | |
- E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ | |
- E1000_WUS = 0x05810, /* Wakeup Status - RO */ | |
- E1000_MANC = 0x05820, /* Management Control - RW */ | |
- E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ | |
- E1000_HOST_IF = 0x08800, /* Host Interface */ | |
- | |
- E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ | |
- E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ | |
- E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */ | |
-#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4)) | |
- E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ | |
- E1000_GCR = 0x05B00, /* PCI-Ex Control */ | |
- E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ | |
- E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ | |
- E1000_SWSM = 0x05B50, /* SW Semaphore */ | |
- E1000_FWSM = 0x05B54, /* FW Semaphore */ | |
- E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ | |
- E1000_FFLT_DBG = 0x05F04, /* Debug Register */ | |
- E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ | |
-#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) | |
-#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE | |
- E1000_HICR = 0x08F00, /* Host Interface Control */ | |
-}; | |
- | |
-#define E1000_MAX_PHY_ADDR 4 | |
- | |
-/* IGP01E1000 Specific Registers */ | |
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ | |
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ | |
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ | |
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ | |
-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ | |
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ | |
-#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ | |
-#define IGP_PAGE_SHIFT 5 | |
-#define PHY_REG_MASK 0x1F | |
- | |
-#define BM_WUC_PAGE 800 | |
-#define BM_WUC_ADDRESS_OPCODE 0x11 | |
-#define BM_WUC_DATA_OPCODE 0x12 | |
-#define BM_WUC_ENABLE_PAGE 769 | |
-#define BM_WUC_ENABLE_REG 17 | |
-#define BM_WUC_ENABLE_BIT (1 << 2) | |
-#define BM_WUC_HOST_WU_BIT (1 << 4) | |
-#define BM_WUC_ME_WU_BIT (1 << 5) | |
- | |
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) | |
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) | |
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) | |
- | |
-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 | |
-#define IGP01E1000_PHY_POLARITY_MASK 0x0078 | |
- | |
-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 | |
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ | |
- | |
-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 | |
- | |
-#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ | |
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ | |
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ | |
- | |
-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 | |
- | |
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 | |
-#define IGP01E1000_PSSR_MDIX 0x0800 | |
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000 | |
-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 | |
- | |
-#define IGP02E1000_PHY_CHANNEL_NUM 4 | |
-#define IGP02E1000_PHY_AGC_A 0x11B1 | |
-#define IGP02E1000_PHY_AGC_B 0x12B1 | |
-#define IGP02E1000_PHY_AGC_C 0x14B1 | |
-#define IGP02E1000_PHY_AGC_D 0x18B1 | |
- | |
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ | |
-#define IGP02E1000_AGC_LENGTH_MASK 0x7F | |
-#define IGP02E1000_AGC_RANGE 15 | |
- | |
-/* manage.c */ | |
-#define E1000_VFTA_ENTRY_SHIFT 5 | |
-#define E1000_VFTA_ENTRY_MASK 0x7F | |
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F | |
- | |
-#define E1000_HICR_EN 0x01 /* Enable bit - RO */ | |
-/* Driver sets this bit when done to put command in RAM */ | |
-#define E1000_HICR_C 0x02 | |
-#define E1000_HICR_FW_RESET_ENABLE 0x40 | |
-#define E1000_HICR_FW_RESET 0x80 | |
- | |
-#define E1000_FWSM_MODE_MASK 0xE | |
-#define E1000_FWSM_MODE_SHIFT 1 | |
- | |
-#define E1000_MNG_IAMT_MODE 0x3 | |
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 | |
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 | |
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 | |
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 | |
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 | |
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 | |
- | |
-/* nvm.c */ | |
-#define E1000_STM_OPCODE 0xDB00 | |
- | |
-#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 | |
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 | |
-#define E1000_KMRNCTRLSTA_REN 0x00200000 | |
-#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ | |
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ | |
-#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ | |
-#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ | |
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ | |
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ | |
-#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 | |
-#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 | |
-#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ | |
- | |
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 | |
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ | |
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ | |
-#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ | |
- | |
-/* IFE PHY Extended Status Control */ | |
-#define IFE_PESC_POLARITY_REVERSED 0x0100 | |
- | |
-/* IFE PHY Special Control */ | |
-#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 | |
-#define IFE_PSC_FORCE_POLARITY 0x0020 | |
- | |
-/* IFE PHY Special Control and LED Control */ | |
-#define IFE_PSCL_PROBE_MODE 0x0020 | |
-#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ | |
-#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ | |
- | |
-/* IFE PHY MDIX Control */ | |
-#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ | |
-#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ | |
-#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ | |
- | |
-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF | |
+struct e1000_hw; | |
#define E1000_DEV_ID_82571EB_COPPER 0x105E | |
#define E1000_DEV_ID_82571EB_FIBER 0x105F | |
@@ -361,13 +52,11 @@ | |
#define E1000_DEV_ID_82573L 0x109A | |
#define E1000_DEV_ID_82574L 0x10D3 | |
#define E1000_DEV_ID_82574LA 0x10F6 | |
-#define E1000_DEV_ID_82583V 0x150C | |
- | |
+#define E1000_DEV_ID_82583V 0x150C | |
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 | |
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 | |
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA | |
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB | |
- | |
#define E1000_DEV_ID_ICH8_82567V_3 0x1501 | |
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 | |
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A | |
@@ -397,13 +86,21 @@ | |
#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 | |
#define E1000_DEV_ID_PCH2_LV_LM 0x1502 | |
#define E1000_DEV_ID_PCH2_LV_V 0x1503 | |
+#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A | |
+#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B | |
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A | |
+#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 | |
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 | |
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1 | |
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ | |
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ | |
-#define E1000_REVISION_4 4 | |
+#define E1000_REVISION_4 4 | |
-#define E1000_FUNC_1 1 | |
+#define E1000_FUNC_1 1 | |
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 | |
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 | |
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 | |
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 | |
enum e1000_mac_type { | |
e1000_82571, | |
@@ -417,6 +114,7 @@ | |
e1000_ich10lan, | |
e1000_pchlan, | |
e1000_pch2lan, | |
+ e1000_pch_lpt, | |
}; | |
enum e1000_media_type { | |
@@ -454,6 +152,7 @@ | |
e1000_phy_82578, | |
e1000_phy_82577, | |
e1000_phy_82579, | |
+ e1000_phy_i217, | |
}; | |
enum e1000_bus_width { | |
@@ -472,7 +171,7 @@ | |
e1000_1000t_rx_status_undefined = 0xFF | |
}; | |
-enum e1000_rev_polarity{ | |
+enum e1000_rev_polarity { | |
e1000_rev_polarity_normal = 0, | |
e1000_rev_polarity_reversed, | |
e1000_rev_polarity_undefined = 0xFF | |
@@ -506,16 +205,6 @@ | |
e1000_serdes_link_forced_up | |
}; | |
-/* Receive Descriptor */ | |
-struct e1000_rx_desc { | |
- __le64 buffer_addr; /* Address of the descriptor's data buffer */ | |
- __le16 length; /* Length of data DMAed into data buffer */ | |
- __le16 csum; /* Packet checksum */ | |
- u8 status; /* Descriptor status */ | |
- u8 errors; /* Descriptor Errors */ | |
- __le16 special; | |
-}; | |
- | |
/* Receive Descriptor - Extended */ | |
union e1000_rx_desc_extended { | |
struct { | |
@@ -542,6 +231,10 @@ | |
}; | |
#define MAX_PS_BUFFERS 4 | |
+ | |
+/* Number of packet split data buffers (not including the header buffer) */ | |
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) | |
+ | |
/* Receive Descriptor - Packet Split */ | |
union e1000_rx_desc_packet_split { | |
struct { | |
@@ -566,7 +259,8 @@ | |
} middle; | |
struct { | |
__le16 header_status; | |
- __le16 length[3]; /* length of buffers 1-3 */ | |
+ /* length of buffers 1-3 */ | |
+ __le16 length[PS_PAGE_BUFFERS]; | |
} upper; | |
__le64 reserved; | |
} wb; /* writeback */ | |
@@ -638,7 +332,7 @@ | |
struct { | |
u8 status; /* Descriptor status */ | |
u8 popts; /* Packet Options */ | |
- __le16 special; /* */ | |
+ __le16 special; | |
} fields; | |
} upper; | |
}; | |
@@ -717,13 +411,13 @@ | |
struct e1000_host_mng_dhcp_cookie { | |
u32 signature; | |
- u8 status; | |
- u8 reserved0; | |
+ u8 status; | |
+ u8 reserved0; | |
u16 vlan_id; | |
u32 reserved1; | |
u16 reserved2; | |
- u8 reserved3; | |
- u8 checksum; | |
+ u8 reserved3; | |
+ u8 checksum; | |
}; | |
/* Host Interface "Rev 1" */ | |
@@ -734,7 +428,7 @@ | |
u8 checksum; | |
}; | |
-#define E1000_HI_MAX_DATA_LENGTH 252 | |
+#define E1000_HI_MAX_DATA_LENGTH 252 | |
struct e1000_host_command_info { | |
struct e1000_host_command_header command_header; | |
u8 command_data[E1000_HI_MAX_DATA_LENGTH]; | |
@@ -742,20 +436,25 @@ | |
/* Host Interface "Rev 2" */ | |
struct e1000_host_mng_command_header { | |
- u8 command_id; | |
- u8 checksum; | |
+ u8 command_id; | |
+ u8 checksum; | |
u16 reserved1; | |
u16 reserved2; | |
u16 command_length; | |
}; | |
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 | |
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 | |
struct e1000_host_mng_command_info { | |
struct e1000_host_mng_command_header command_header; | |
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; | |
}; | |
-/* Function pointers and static data for the MAC. */ | |
+#include "mac.h" | |
+#include "phy.h" | |
+#include "nvm.h" | |
+#include "manage.h" | |
+ | |
+/* Function pointers for the MAC. */ | |
struct e1000_mac_operations { | |
s32 (*id_led_init)(struct e1000_hw *); | |
s32 (*blink_led)(struct e1000_hw *); | |
@@ -776,11 +475,12 @@ | |
s32 (*setup_physical_interface)(struct e1000_hw *); | |
s32 (*setup_led)(struct e1000_hw *); | |
void (*write_vfta)(struct e1000_hw *, u32, u32); | |
+ void (*config_collision_dist)(struct e1000_hw *); | |
+ void (*rar_set)(struct e1000_hw *, u8 *, u32); | |
s32 (*read_mac_addr)(struct e1000_hw *); | |
}; | |
-/* | |
- * When to use various PHY register access functions: | |
+/* When to use various PHY register access functions: | |
* | |
* Func Caller | |
* Function Does Does When to use | |
@@ -824,6 +524,7 @@ | |
s32 (*acquire)(struct e1000_hw *); | |
s32 (*read)(struct e1000_hw *, u16, u16, u16 *); | |
void (*release)(struct e1000_hw *); | |
+ void (*reload)(struct e1000_hw *); | |
s32 (*update)(struct e1000_hw *); | |
s32 (*valid_led_default)(struct e1000_hw *, u16 *); | |
s32 (*validate)(struct e1000_hw *); | |
@@ -853,11 +554,11 @@ | |
u16 mta_reg_count; | |
/* Maximum size of the MTA register table in all supported adapters */ | |
- #define MAX_MTA_REG 128 | |
+#define MAX_MTA_REG 128 | |
u32 mta_shadow[MAX_MTA_REG]; | |
u16 rar_entry_count; | |
- u8 forced_speed_duplex; | |
+ u8 forced_speed_duplex; | |
bool adaptive_ifs; | |
bool has_fwsm; | |
@@ -885,7 +586,7 @@ | |
u32 addr; | |
u32 id; | |
- u32 reset_delay_us; /* in usec */ | |
+ u32 reset_delay_us; /* in usec */ | |
u32 revision; | |
enum e1000_media_type media_type; | |
@@ -944,11 +645,11 @@ | |
}; | |
struct e1000_dev_spec_80003es2lan { | |
- bool mdic_wa_enable; | |
+ bool mdic_wa_enable; | |
}; | |
struct e1000_shadow_ram { | |
- u16 value; | |
+ u16 value; | |
bool modified; | |
}; | |
@@ -959,26 +660,31 @@ | |
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; | |
bool nvm_k1_enabled; | |
bool eee_disable; | |
+ u16 eee_lp_ability; | |
}; | |
struct e1000_hw { | |
struct e1000_adapter *adapter; | |
- u8 __iomem *hw_addr; | |
- u8 __iomem *flash_address; | |
+ void __iomem *hw_addr; | |
+ void __iomem *flash_address; | |
- struct e1000_mac_info mac; | |
- struct e1000_fc_info fc; | |
- struct e1000_phy_info phy; | |
- struct e1000_nvm_info nvm; | |
- struct e1000_bus_info bus; | |
+ struct e1000_mac_info mac; | |
+ struct e1000_fc_info fc; | |
+ struct e1000_phy_info phy; | |
+ struct e1000_nvm_info nvm; | |
+ struct e1000_bus_info bus; | |
struct e1000_host_mng_dhcp_cookie mng_cookie; | |
union { | |
- struct e1000_dev_spec_82571 e82571; | |
+ struct e1000_dev_spec_82571 e82571; | |
struct e1000_dev_spec_80003es2lan e80003es2lan; | |
- struct e1000_dev_spec_ich8lan ich8lan; | |
+ struct e1000_dev_spec_ich8lan ich8lan; | |
} dev_spec; | |
}; | |
+#include "82571.h" | |
+#include "80003es2lan.h" | |
+#include "ich8lan.h" | |
+ | |
#endif | |
diff -ru e1000e/ich8lan.c /home/arch/linux/drivers/net/ethernet/intel/e1000e/ich8lan.c | |
--- e1000e/ich8lan.c 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/ich8lan.c 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -26,8 +26,7 @@ | |
*******************************************************************************/ | |
-/* | |
- * 82562G 10/100 Network Connection | |
+/* 82562G 10/100 Network Connection | |
* 82562G-2 10/100 Network Connection | |
* 82562GT 10/100 Network Connection | |
* 82562GT-2 10/100 Network Connection | |
@@ -58,130 +57,19 @@ | |
#include "e1000.h" | |
-#define ICH_FLASH_GFPREG 0x0000 | |
-#define ICH_FLASH_HSFSTS 0x0004 | |
-#define ICH_FLASH_HSFCTL 0x0006 | |
-#define ICH_FLASH_FADDR 0x0008 | |
-#define ICH_FLASH_FDATA0 0x0010 | |
-#define ICH_FLASH_PR0 0x0074 | |
- | |
-#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 | |
-#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 | |
-#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 | |
-#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF | |
-#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 | |
- | |
-#define ICH_CYCLE_READ 0 | |
-#define ICH_CYCLE_WRITE 2 | |
-#define ICH_CYCLE_ERASE 3 | |
- | |
-#define FLASH_GFPREG_BASE_MASK 0x1FFF | |
-#define FLASH_SECTOR_ADDR_SHIFT 12 | |
- | |
-#define ICH_FLASH_SEG_SIZE_256 256 | |
-#define ICH_FLASH_SEG_SIZE_4K 4096 | |
-#define ICH_FLASH_SEG_SIZE_8K 8192 | |
-#define ICH_FLASH_SEG_SIZE_64K 65536 | |
- | |
- | |
-#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ | |
-/* FW established a valid mode */ | |
-#define E1000_ICH_FWSM_FW_VALID 0x00008000 | |
- | |
-#define E1000_ICH_MNG_IAMT_MODE 0x2 | |
- | |
-#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ | |
- (ID_LED_DEF1_OFF2 << 8) | \ | |
- (ID_LED_DEF1_ON2 << 4) | \ | |
- (ID_LED_DEF1_DEF2)) | |
- | |
-#define E1000_ICH_NVM_SIG_WORD 0x13 | |
-#define E1000_ICH_NVM_SIG_MASK 0xC000 | |
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 | |
-#define E1000_ICH_NVM_SIG_VALUE 0x80 | |
- | |
-#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 | |
- | |
-#define E1000_FEXTNVM_SW_CONFIG 1 | |
-#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ | |
- | |
-#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 | |
-#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 | |
-#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 | |
- | |
-#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL | |
- | |
-#define E1000_ICH_RAR_ENTRIES 7 | |
- | |
-#define PHY_PAGE_SHIFT 5 | |
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ | |
- ((reg) & MAX_PHY_REG_ADDRESS)) | |
-#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ | |
-#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ | |
- | |
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 | |
-#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 | |
-#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 | |
- | |
-#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ | |
- | |
-#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ | |
- | |
-/* SMBus Address Phy Register */ | |
-#define HV_SMB_ADDR PHY_REG(768, 26) | |
-#define HV_SMB_ADDR_MASK 0x007F | |
-#define HV_SMB_ADDR_PEC_EN 0x0200 | |
-#define HV_SMB_ADDR_VALID 0x0080 | |
- | |
-/* PHY Power Management Control */ | |
-#define HV_PM_CTRL PHY_REG(770, 17) | |
- | |
-/* PHY Low Power Idle Control */ | |
-#define I82579_LPI_CTRL PHY_REG(772, 20) | |
-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 | |
-#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 | |
- | |
-/* EMI Registers */ | |
-#define I82579_EMI_ADDR 0x10 | |
-#define I82579_EMI_DATA 0x11 | |
-#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ | |
- | |
-/* Strapping Option Register - RO */ | |
-#define E1000_STRAP 0x0000C | |
-#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 | |
-#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 | |
- | |
-/* OEM Bits Phy Register */ | |
-#define HV_OEM_BITS PHY_REG(768, 25) | |
-#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ | |
-#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ | |
-#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ | |
- | |
-#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ | |
-#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ | |
- | |
-/* KMRN Mode Control */ | |
-#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) | |
-#define HV_KMRN_MDIO_SLOW 0x0400 | |
- | |
-/* KMRN FIFO Control and Status */ | |
-#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) | |
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 | |
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 | |
- | |
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | |
/* Offset 04h HSFSTS */ | |
union ich8_hws_flash_status { | |
struct ich8_hsfsts { | |
- u16 flcdone :1; /* bit 0 Flash Cycle Done */ | |
- u16 flcerr :1; /* bit 1 Flash Cycle Error */ | |
- u16 dael :1; /* bit 2 Direct Access error Log */ | |
- u16 berasesz :2; /* bit 4:3 Sector Erase Size */ | |
- u16 flcinprog :1; /* bit 5 flash cycle in Progress */ | |
- u16 reserved1 :2; /* bit 13:6 Reserved */ | |
- u16 reserved2 :6; /* bit 13:6 Reserved */ | |
- u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ | |
- u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ | |
+ u16 flcdone:1; /* bit 0 Flash Cycle Done */ | |
+ u16 flcerr:1; /* bit 1 Flash Cycle Error */ | |
+ u16 dael:1; /* bit 2 Direct Access error Log */ | |
+ u16 berasesz:2; /* bit 4:3 Sector Erase Size */ | |
+ u16 flcinprog:1; /* bit 5 flash cycle in Progress */ | |
+ u16 reserved1:2; /* bit 13:6 Reserved */ | |
+ u16 reserved2:6; /* bit 13:6 Reserved */ | |
+ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ | |
+ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ | |
} hsf_status; | |
u16 regval; | |
}; | |
@@ -190,11 +78,11 @@ | |
/* Offset 06h FLCTL */ | |
union ich8_hws_flash_ctrl { | |
struct ich8_hsflctl { | |
- u16 flcgo :1; /* 0 Flash Cycle Go */ | |
- u16 flcycle :2; /* 2:1 Flash Cycle */ | |
- u16 reserved :5; /* 7:3 Reserved */ | |
- u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ | |
- u16 flockdn :6; /* 15:10 Reserved */ | |
+ u16 flcgo:1; /* 0 Flash Cycle Go */ | |
+ u16 flcycle:2; /* 2:1 Flash Cycle */ | |
+ u16 reserved:5; /* 7:3 Reserved */ | |
+ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ | |
+ u16 flockdn:6; /* 15:10 Reserved */ | |
} hsf_ctrl; | |
u16 regval; | |
}; | |
@@ -202,10 +90,10 @@ | |
/* ICH Flash Region Access Permissions */ | |
union ich8_hws_flash_regacc { | |
struct ich8_flracc { | |
- u32 grra :8; /* 0:7 GbE region Read Access */ | |
- u32 grwa :8; /* 8:15 GbE region Write Access */ | |
- u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ | |
- u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ | |
+ u32 grra:8; /* 0:7 GbE region Read Access */ | |
+ u32 grwa:8; /* 8:15 GbE region Write Access */ | |
+ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ | |
+ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ | |
} hsf_flregacc; | |
u16 regval; | |
}; | |
@@ -213,17 +101,16 @@ | |
/* ICH Flash Protected Region */ | |
union ich8_flash_protected_range { | |
struct ich8_pr { | |
- u32 base:13; /* 0:12 Protected Range Base */ | |
- u32 reserved1:2; /* 13:14 Reserved */ | |
- u32 rpe:1; /* 15 Read Protection Enable */ | |
- u32 limit:13; /* 16:28 Protected Range Limit */ | |
- u32 reserved2:2; /* 29:30 Reserved */ | |
- u32 wpe:1; /* 31 Write Protection Enable */ | |
+ u32 base:13; /* 0:12 Protected Range Base */ | |
+ u32 reserved1:2; /* 13:14 Reserved */ | |
+ u32 rpe:1; /* 15 Read Protection Enable */ | |
+ u32 limit:13; /* 16:28 Protected Range Limit */ | |
+ u32 reserved2:2; /* 29:30 Reserved */ | |
+ u32 wpe:1; /* 31 Write Protection Enable */ | |
} range; | |
u32 regval; | |
}; | |
-static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); | |
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); | |
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); | |
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); | |
@@ -235,9 +122,7 @@ | |
u16 *data); | |
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |
u8 size, u16 *data); | |
-static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); | |
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); | |
-static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); | |
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); | |
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); | |
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); | |
@@ -249,12 +134,15 @@ | |
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); | |
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); | |
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); | |
-static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | |
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | |
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); | |
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); | |
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); | |
+static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); | |
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); | |
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); | |
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); | |
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); | |
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) | |
{ | |
@@ -278,80 +166,198 @@ | |
#define er16flash(reg) __er16flash(hw, (reg)) | |
#define er32flash(reg) __er32flash(hw, (reg)) | |
-#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) | |
-#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) | |
+#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) | |
+#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) | |
-static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) | |
+/** | |
+ * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers | |
+ * @hw: pointer to the HW structure | |
+ * | |
+ * Test access to the PHY registers by reading the PHY ID registers. If | |
+ * the PHY ID is already known (e.g. resume path) compare it with known ID, | |
+ * otherwise assume the read PHY ID is correct if it is valid. | |
+ * | |
+ * Assumes the sw/fw/hw semaphore is already acquired. | |
+ **/ | |
+static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) | |
{ | |
- u32 ctrl; | |
+ u16 phy_reg = 0; | |
+ u32 phy_id = 0; | |
+ s32 ret_val; | |
+ u16 retry_count; | |
+ u32 mac_reg = 0; | |
- ctrl = er32(CTRL); | |
- ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; | |
- ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; | |
- ew32(CTRL, ctrl); | |
- e1e_flush(); | |
- udelay(10); | |
- ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; | |
- ew32(CTRL, ctrl); | |
+ for (retry_count = 0; retry_count < 2; retry_count++) { | |
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); | |
+ if (ret_val || (phy_reg == 0xFFFF)) | |
+ continue; | |
+ phy_id = (u32)(phy_reg << 16); | |
+ | |
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); | |
+ if (ret_val || (phy_reg == 0xFFFF)) { | |
+ phy_id = 0; | |
+ continue; | |
+ } | |
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); | |
+ break; | |
+ } | |
+ | |
+ if (hw->phy.id) { | |
+ if (hw->phy.id == phy_id) | |
+ goto out; | |
+ } else if (phy_id) { | |
+ hw->phy.id = phy_id; | |
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); | |
+ goto out; | |
+ } | |
+ | |
+ /* In case the PHY needs to be in mdio slow mode, | |
+ * set slow mode and try to get the PHY id again. | |
+ */ | |
+ hw->phy.ops.release(hw); | |
+ ret_val = e1000_set_mdio_slow_mode_hv(hw); | |
+ if (!ret_val) | |
+ ret_val = e1000e_get_phy_id(hw); | |
+ hw->phy.ops.acquire(hw); | |
+ | |
+ if (ret_val) | |
+ return false; | |
+out: | |
+ if (hw->mac.type == e1000_pch_lpt) { | |
+ /* Unforce SMBus mode in PHY */ | |
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); | |
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; | |
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); | |
+ | |
+ /* Unforce SMBus mode in MAC */ | |
+ mac_reg = er32(CTRL_EXT); | |
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; | |
+ ew32(CTRL_EXT, mac_reg); | |
+ } | |
+ | |
+ return true; | |
} | |
/** | |
- * e1000_init_phy_params_pchlan - Initialize PHY function pointers | |
+ * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds | |
* @hw: pointer to the HW structure | |
* | |
- * Initialize family-specific PHY parameters and function pointers. | |
+ * Workarounds/flow necessary for PHY initialization during driver load | |
+ * and resume paths. | |
**/ | |
-static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |
+static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) | |
{ | |
- struct e1000_phy_info *phy = &hw->phy; | |
- u32 fwsm; | |
- s32 ret_val = 0; | |
+ u32 mac_reg, fwsm = er32(FWSM); | |
+ s32 ret_val; | |
- phy->addr = 1; | |
- phy->reset_delay_us = 100; | |
+ /* Gate automatic PHY configuration by hardware on managed and | |
+ * non-managed 82579 and newer adapters. | |
+ */ | |
+ e1000_gate_hw_phy_config_ich8lan(hw, true); | |
- phy->ops.set_page = e1000_set_page_igp; | |
- phy->ops.read_reg = e1000_read_phy_reg_hv; | |
- phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; | |
- phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; | |
- phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; | |
- phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; | |
- phy->ops.write_reg = e1000_write_phy_reg_hv; | |
- phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; | |
- phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; | |
- phy->ops.power_up = e1000_power_up_phy_copper; | |
- phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | |
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | |
- | |
- /* | |
- * The MAC-PHY interconnect may still be in SMBus mode | |
- * after Sx->S0. If the manageability engine (ME) is | |
- * disabled, then toggle the LANPHYPC Value bit to force | |
- * the interconnect to PCIe mode. | |
+ ret_val = hw->phy.ops.acquire(hw); | |
+ if (ret_val) { | |
+ e_dbg("Failed to initialize PHY flow\n"); | |
+ goto out; | |
+ } | |
+ | |
+ /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is | |
+ * inaccessible and resetting the PHY is not blocked, toggle the | |
+ * LANPHYPC Value bit to force the interconnect to PCIe mode. | |
*/ | |
- fwsm = er32(FWSM); | |
- if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { | |
- e1000_toggle_lanphypc_value_ich8lan(hw); | |
- msleep(50); | |
+ switch (hw->mac.type) { | |
+ case e1000_pch_lpt: | |
+ if (e1000_phy_is_accessible_pchlan(hw)) | |
+ break; | |
- /* | |
- * Gate automatic PHY configuration by hardware on | |
- * non-managed 82579 | |
+ /* Before toggling LANPHYPC, see if PHY is accessible by | |
+ * forcing MAC to SMBus mode first. | |
*/ | |
- if (hw->mac.type == e1000_pch2lan) | |
- e1000_gate_hw_phy_config_ich8lan(hw, true); | |
+ mac_reg = er32(CTRL_EXT); | |
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; | |
+ ew32(CTRL_EXT, mac_reg); | |
+ | |
+ /* Wait 50 milliseconds for MAC to finish any retries | |
+ * that it might be trying to perform from previous | |
+ * attempts to acknowledge any phy read requests. | |
+ */ | |
+ msleep(50); | |
+ | |
+ /* fall-through */ | |
+ case e1000_pch2lan: | |
+ if (e1000_phy_is_accessible_pchlan(hw)) | |
+ break; | |
+ | |
+ /* fall-through */ | |
+ case e1000_pchlan: | |
+ if ((hw->mac.type == e1000_pchlan) && | |
+ (fwsm & E1000_ICH_FWSM_FW_VALID)) | |
+ break; | |
+ | |
+ if (hw->phy.ops.check_reset_block(hw)) { | |
+ e_dbg("Required LANPHYPC toggle blocked by ME\n"); | |
+ ret_val = -E1000_ERR_PHY; | |
+ break; | |
+ } | |
+ | |
+ e_dbg("Toggling LANPHYPC\n"); | |
+ | |
+ /* Set Phy Config Counter to 50msec */ | |
+ mac_reg = er32(FEXTNVM3); | |
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; | |
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; | |
+ ew32(FEXTNVM3, mac_reg); | |
+ | |
+ /* Toggle LANPHYPC Value bit */ | |
+ mac_reg = er32(CTRL); | |
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; | |
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; | |
+ ew32(CTRL, mac_reg); | |
+ e1e_flush(); | |
+ usleep_range(10, 20); | |
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; | |
+ ew32(CTRL, mac_reg); | |
+ e1e_flush(); | |
+ if (hw->mac.type < e1000_pch_lpt) { | |
+ msleep(50); | |
+ } else { | |
+ u16 count = 20; | |
+ do { | |
+ usleep_range(5000, 10000); | |
+ } while (!(er32(CTRL_EXT) & | |
+ E1000_CTRL_EXT_LPCD) && count--); | |
+ usleep_range(30000, 60000); | |
+ if (e1000_phy_is_accessible_pchlan(hw)) | |
+ break; | |
+ | |
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode | |
+ * so ensure that the MAC is also out of SMBus mode | |
+ */ | |
+ mac_reg = er32(CTRL_EXT); | |
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; | |
+ ew32(CTRL_EXT, mac_reg); | |
+ | |
+ if (e1000_phy_is_accessible_pchlan(hw)) | |
+ break; | |
+ | |
+ ret_val = -E1000_ERR_PHY; | |
+ } | |
+ break; | |
+ default: | |
+ break; | |
} | |
- /* | |
- * Reset the PHY before any access to it. Doing so, ensures that | |
- * the PHY is in a known good state before we read/write PHY registers. | |
- * The generic reset is sufficient here, because we haven't determined | |
- * the PHY type yet. | |
- */ | |
- ret_val = e1000e_phy_hw_reset_generic(hw); | |
- if (ret_val) | |
- goto out; | |
+ hw->phy.ops.release(hw); | |
+ if (!ret_val) { | |
+ /* Reset the PHY before any access to it. Doing so, ensures | |
+ * that the PHY is in a known good state before we read/write | |
+ * PHY registers. The generic reset is sufficient here, | |
+ * because we haven't determined the PHY type yet. | |
+ */ | |
+ ret_val = e1000e_phy_hw_reset_generic(hw); | |
+ } | |
+out: | |
/* Ungate automatic PHY configuration on non-managed 82579 */ | |
if ((hw->mac.type == e1000_pch2lan) && | |
!(fwsm & E1000_ICH_FWSM_FW_VALID)) { | |
@@ -359,33 +365,70 @@ | |
e1000_gate_hw_phy_config_ich8lan(hw, false); | |
} | |
+ return ret_val; | |
+} | |
+ | |
+/** | |
+ * e1000_init_phy_params_pchlan - Initialize PHY function pointers | |
+ * @hw: pointer to the HW structure | |
+ * | |
+ * Initialize family-specific PHY parameters and function pointers. | |
+ **/ | |
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |
+{ | |
+ struct e1000_phy_info *phy = &hw->phy; | |
+ s32 ret_val; | |
+ | |
+ phy->addr = 1; | |
+ phy->reset_delay_us = 100; | |
+ | |
+ phy->ops.set_page = e1000_set_page_igp; | |
+ phy->ops.read_reg = e1000_read_phy_reg_hv; | |
+ phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; | |
+ phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; | |
+ phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; | |
+ phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; | |
+ phy->ops.write_reg = e1000_write_phy_reg_hv; | |
+ phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; | |
+ phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; | |
+ phy->ops.power_up = e1000_power_up_phy_copper; | |
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | |
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | |
+ | |
phy->id = e1000_phy_unknown; | |
- switch (hw->mac.type) { | |
- default: | |
- ret_val = e1000e_get_phy_id(hw); | |
- if (ret_val) | |
- goto out; | |
- if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) | |
+ | |
+ ret_val = e1000_init_phy_workarounds_pchlan(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ if (phy->id == e1000_phy_unknown) | |
+ switch (hw->mac.type) { | |
+ default: | |
+ ret_val = e1000e_get_phy_id(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) | |
+ break; | |
+ /* fall-through */ | |
+ case e1000_pch2lan: | |
+ case e1000_pch_lpt: | |
+ /* In case the PHY needs to be in mdio slow mode, | |
+ * set slow mode and try to get the PHY id again. | |
+ */ | |
+ ret_val = e1000_set_mdio_slow_mode_hv(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ ret_val = e1000e_get_phy_id(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
break; | |
- /* fall-through */ | |
- case e1000_pch2lan: | |
- /* | |
- * In case the PHY needs to be in mdio slow mode, | |
- * set slow mode and try to get the PHY id again. | |
- */ | |
- ret_val = e1000_set_mdio_slow_mode_hv(hw); | |
- if (ret_val) | |
- goto out; | |
- ret_val = e1000e_get_phy_id(hw); | |
- if (ret_val) | |
- goto out; | |
- break; | |
- } | |
+ } | |
phy->type = e1000e_get_phy_type_from_id(phy->id); | |
switch (phy->type) { | |
case e1000_phy_82577: | |
case e1000_phy_82579: | |
+ case e1000_phy_i217: | |
phy->ops.check_polarity = e1000_check_polarity_82577; | |
phy->ops.force_speed_duplex = | |
e1000_phy_force_speed_duplex_82577; | |
@@ -404,7 +447,6 @@ | |
break; | |
} | |
-out: | |
return ret_val; | |
} | |
@@ -420,20 +462,19 @@ | |
s32 ret_val; | |
u16 i = 0; | |
- phy->addr = 1; | |
- phy->reset_delay_us = 100; | |
+ phy->addr = 1; | |
+ phy->reset_delay_us = 100; | |
- phy->ops.power_up = e1000_power_up_phy_copper; | |
- phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | |
+ phy->ops.power_up = e1000_power_up_phy_copper; | |
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | |
- /* | |
- * We may need to do this twice - once for IGP and if that fails, | |
+ /* We may need to do this twice - once for IGP and if that fails, | |
* we'll set BM func pointers and try again | |
*/ | |
ret_val = e1000e_determine_phy_address(hw); | |
if (ret_val) { | |
phy->ops.write_reg = e1000e_write_phy_reg_bm; | |
- phy->ops.read_reg = e1000e_read_phy_reg_bm; | |
+ phy->ops.read_reg = e1000e_read_phy_reg_bm; | |
ret_val = e1000e_determine_phy_address(hw); | |
if (ret_val) { | |
e_dbg("Cannot determine PHY addr. Erroring out\n"); | |
@@ -512,8 +553,7 @@ | |
gfpreg = er32flash(ICH_FLASH_GFPREG); | |
- /* | |
- * sector_X_addr is a "sector"-aligned address (4096 bytes) | |
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes) | |
* Add 1 to sector_end_addr since this sector is included in | |
* the overall size. | |
*/ | |
@@ -523,12 +563,11 @@ | |
/* flash_base_addr is byte-aligned */ | |
nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; | |
- /* | |
- * find total size of the NVM, then cut in half since the total | |
+ /* find total size of the NVM, then cut in half since the total | |
* size represents two separate NVM banks. | |
*/ | |
- nvm->flash_bank_size = (sector_end_addr - sector_base_addr) | |
- << FLASH_SECTOR_ADDR_SHIFT; | |
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) | |
+ << FLASH_SECTOR_ADDR_SHIFT); | |
nvm->flash_bank_size /= 2; | |
/* Adjust to word count */ | |
nvm->flash_bank_size /= sizeof(u16); | |
@@ -538,7 +577,7 @@ | |
/* Clear shadow ram */ | |
for (i = 0; i < nvm->word_size; i++) { | |
dev_spec->shadow_ram[i].modified = false; | |
- dev_spec->shadow_ram[i].value = 0xFFFF; | |
+ dev_spec->shadow_ram[i].value = 0xFFFF; | |
} | |
return 0; | |
@@ -551,9 +590,8 @@ | |
* Initialize family-specific MAC parameters and function | |
* pointers. | |
**/ | |
-static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |
+static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) | |
{ | |
- struct e1000_hw *hw = &adapter->hw; | |
struct e1000_mac_info *mac = &hw->mac; | |
/* Set media type function pointer */ | |
@@ -572,7 +610,7 @@ | |
/* Adaptive IFS supported */ | |
mac->adaptive_ifs = true; | |
- /* LED operations */ | |
+ /* LED and other operations */ | |
switch (mac->type) { | |
case e1000_ich8lan: | |
case e1000_ich9lan: | |
@@ -580,7 +618,7 @@ | |
/* check management mode */ | |
mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; | |
/* ID LED init */ | |
- mac->ops.id_led_init = e1000e_id_led_init; | |
+ mac->ops.id_led_init = e1000e_id_led_init_generic; | |
/* blink LED */ | |
mac->ops.blink_led = e1000e_blink_led_generic; | |
/* setup LED */ | |
@@ -591,8 +629,12 @@ | |
mac->ops.led_on = e1000_led_on_ich8lan; | |
mac->ops.led_off = e1000_led_off_ich8lan; | |
break; | |
- case e1000_pchlan: | |
case e1000_pch2lan: | |
+ mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; | |
+ mac->ops.rar_set = e1000_rar_set_pch2lan; | |
+ /* fall-through */ | |
+ case e1000_pch_lpt: | |
+ case e1000_pchlan: | |
/* check management mode */ | |
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; | |
/* ID LED init */ | |
@@ -609,48 +651,334 @@ | |
break; | |
} | |
+ if (mac->type == e1000_pch_lpt) { | |
+ mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; | |
+ mac->ops.rar_set = e1000_rar_set_pch_lpt; | |
+ mac->ops.setup_physical_interface = | |
+ e1000_setup_copper_link_pch_lpt; | |
+ } | |
+ | |
/* Enable PCS Lock-loss workaround for ICH8 */ | |
if (mac->type == e1000_ich8lan) | |
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); | |
- /* Gate automatic PHY configuration by hardware on managed 82579 */ | |
- if ((mac->type == e1000_pch2lan) && | |
- (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | |
- e1000_gate_hw_phy_config_ich8lan(hw, true); | |
- | |
return 0; | |
} | |
/** | |
+ * __e1000_access_emi_reg_locked - Read/write EMI register | |
+ * @hw: pointer to the HW structure | |
+ * @addr: EMI address to program | |
+ * @data: pointer to value to read/write from/to the EMI address | |
+ * @read: boolean flag to indicate read or write | |
+ * | |
+ * This helper function assumes the SW/FW/HW Semaphore is already acquired. | |
+ **/ | |
+static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, | |
+ u16 *data, bool read) | |
+{ | |
+ s32 ret_val; | |
+ | |
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ if (read) | |
+ ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); | |
+ else | |
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); | |
+ | |
+ return ret_val; | |
+} | |
+ | |
+/** | |
+ * e1000_read_emi_reg_locked - Read Extended Management Interface register | |
+ * @hw: pointer to the HW structure | |
+ * @addr: EMI address to program | |
+ * @data: value to be read from the EMI address | |
+ * | |
+ * Assumes the SW/FW/HW Semaphore is already acquired. | |
+ **/ | |
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) | |
+{ | |
+ return __e1000_access_emi_reg_locked(hw, addr, data, true); | |
+} | |
+ | |
+/** | |
+ * e1000_write_emi_reg_locked - Write Extended Management Interface register | |
+ * @hw: pointer to the HW structure | |
+ * @addr: EMI address to program | |
+ * @data: value to be written to the EMI address | |
+ * | |
+ * Assumes the SW/FW/HW Semaphore is already acquired. | |
+ **/ | |
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) | |
+{ | |
+ return __e1000_access_emi_reg_locked(hw, addr, &data, false); | |
+} | |
+ | |
+/** | |
* e1000_set_eee_pchlan - Enable/disable EEE support | |
* @hw: pointer to the HW structure | |
* | |
- * Enable/disable EEE based on setting in dev_spec structure. The bits in | |
- * the LPI Control register will remain set only if/when link is up. | |
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex of | |
+ * the link and the EEE capabilities of the link partner. The LPI Control | |
+ * register bits will remain set only if/when link is up. | |
**/ | |
static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) | |
{ | |
- s32 ret_val = 0; | |
- u16 phy_reg; | |
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | |
+ s32 ret_val; | |
+ u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; | |
- if (hw->phy.type != e1000_phy_82579) | |
- goto out; | |
+ switch (hw->phy.type) { | |
+ case e1000_phy_82579: | |
+ lpa = I82579_EEE_LP_ABILITY; | |
+ pcs_status = I82579_EEE_PCS_STATUS; | |
+ adv_addr = I82579_EEE_ADVERTISEMENT; | |
+ break; | |
+ case e1000_phy_i217: | |
+ lpa = I217_EEE_LP_ABILITY; | |
+ pcs_status = I217_EEE_PCS_STATUS; | |
+ adv_addr = I217_EEE_ADVERTISEMENT; | |
+ break; | |
+ default: | |
+ return 0; | |
+ } | |
- ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); | |
+ ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- if (hw->dev_spec.ich8lan.eee_disable) | |
- phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; | |
- else | |
- phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; | |
+ ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); | |
+ if (ret_val) | |
+ goto release; | |
+ | |
+ /* Clear bits that enable EEE in various speeds */ | |
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; | |
+ | |
+ /* Enable EEE if not disabled by user */ | |
+ if (!dev_spec->eee_disable) { | |
+ /* Save off link partner's EEE ability */ | |
+ ret_val = e1000_read_emi_reg_locked(hw, lpa, | |
+ &dev_spec->eee_lp_ability); | |
+ if (ret_val) | |
+ goto release; | |
+ | |
+ /* Read EEE advertisement */ | |
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); | |
+ if (ret_val) | |
+ goto release; | |
+ | |
+ /* Enable EEE only for speeds in which the link partner is | |
+ * EEE capable and for which we advertise EEE. | |
+ */ | |
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) | |
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; | |
+ | |
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { | |
+ e1e_rphy_locked(hw, MII_LPA, &data); | |
+ if (data & LPA_100FULL) | |
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; | |
+ else | |
+ /* EEE is not supported in 100Half, so ignore | |
+ * partner's EEE in 100 ability if full-duplex | |
+ * is not advertised. | |
+ */ | |
+ dev_spec->eee_lp_ability &= | |
+ ~I82579_EEE_100_SUPPORTED; | |
+ } | |
+ } | |
+ | |
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ | |
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); | |
+ if (ret_val) | |
+ goto release; | |
+ | |
+ ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); | |
+release: | |
+ hw->phy.ops.release(hw); | |
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); | |
-out: | |
return ret_val; | |
} | |
/** | |
+ * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP | |
+ * @hw: pointer to the HW structure | |
+ * @link: link up bool flag | |
+ * | |
+ * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications | |
+ * preventing further DMA write requests. Workaround the issue by disabling | |
+ * the de-assertion of the clock request when in 1Gpbs mode. | |
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link | |
+ * speeds in order to avoid Tx hangs. | |
+ **/ | |
+static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) | |
+{ | |
+ u32 fextnvm6 = er32(FEXTNVM6); | |
+ u32 status = er32(STATUS); | |
+ s32 ret_val = 0; | |
+ u16 reg; | |
+ | |
+ if (link && (status & E1000_STATUS_SPEED_1000)) { | |
+ ret_val = hw->phy.ops.acquire(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ ret_val = | |
+ e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, | |
+ ®); | |
+ if (ret_val) | |
+ goto release; | |
+ | |
+ ret_val = | |
+ e1000e_write_kmrn_reg_locked(hw, | |
+ E1000_KMRNCTRLSTA_K1_CONFIG, | |
+ reg & | |
+ ~E1000_KMRNCTRLSTA_K1_ENABLE); | |
+ if (ret_val) | |
+ goto release; | |
+ | |
+ usleep_range(10, 20); | |
+ | |
+ ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); | |
+ | |
+ ret_val = | |
+ e1000e_write_kmrn_reg_locked(hw, | |
+ E1000_KMRNCTRLSTA_K1_CONFIG, | |
+ reg); | |
+release: | |
+ hw->phy.ops.release(hw); | |
+ } else { | |
+ /* clear FEXTNVM6 bit 8 on link down or 10/100 */ | |
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; | |
+ | |
+ if (!link || ((status & E1000_STATUS_SPEED_100) && | |
+ (status & E1000_STATUS_FD))) | |
+ goto update_fextnvm6; | |
+ | |
+ ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ /* Clear link status transmit timeout */ | |
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; | |
+ | |
+ if (status & E1000_STATUS_SPEED_100) { | |
+ /* Set inband Tx timeout to 5x10us for 100Half */ | |
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; | |
+ | |
+ /* Do not extend the K1 entry latency for 100Half */ | |
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; | |
+ } else { | |
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */ | |
+ reg |= 50 << | |
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; | |
+ | |
+ /* Extend the K1 entry latency for 10 Mbps */ | |
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; | |
+ } | |
+ | |
+ ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+update_fextnvm6: | |
+ ew32(FEXTNVM6, fextnvm6); | |
+ } | |
+ | |
+ return ret_val; | |
+} | |
+ | |
+/** | |
+ * e1000_platform_pm_pch_lpt - Set platform power management values | |
+ * @hw: pointer to the HW structure | |
+ * @link: bool indicating link status | |
+ * | |
+ * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" | |
+ * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed | |
+ * when link is up (which must not exceed the maximum latency supported | |
+ * by the platform), otherwise specify there is no LTR requirement. | |
+ * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop | |
+ * latencies in the LTR Extended Capability Structure in the PCIe Extended | |
+ * Capability register set, on this device LTR is set by writing the | |
+ * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and | |
+ * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) | |
+ * message to the PMC. | |
+ **/ | |
+static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) | |
+{ | |
+ u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | | |
+ link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; | |
+ u16 lat_enc = 0; /* latency encoded */ | |
+ | |
+ if (link) { | |
+ u16 speed, duplex, scale = 0; | |
+ u16 max_snoop, max_nosnoop; | |
+ u16 max_ltr_enc; /* max LTR latency encoded */ | |
+ s64 lat_ns; /* latency (ns) */ | |
+ s64 value; | |
+ u32 rxa; | |
+ | |
+ if (!hw->adapter->max_frame_size) { | |
+ e_dbg("max_frame_size not set.\n"); | |
+ return -E1000_ERR_CONFIG; | |
+ } | |
+ | |
+ hw->mac.ops.get_link_up_info(hw, &speed, &duplex); | |
+ if (!speed) { | |
+ e_dbg("Speed not set.\n"); | |
+ return -E1000_ERR_CONFIG; | |
+ } | |
+ | |
+ /* Rx Packet Buffer Allocation size (KB) */ | |
+ rxa = er32(PBA) & E1000_PBA_RXA_MASK; | |
+ | |
+ /* Determine the maximum latency tolerated by the device. | |
+ * | |
+ * Per the PCIe spec, the tolerated latencies are encoded as | |
+ * a 3-bit encoded scale (only 0-5 are valid) multiplied by | |
+ * a 10-bit value (0-1023) to provide a range from 1 ns to | |
+ * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, | |
+ * 1=2^5ns, 2=2^10ns,...5=2^25ns. | |
+ */ | |
+ lat_ns = ((s64)rxa * 1024 - | |
+ (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; | |
+ if (lat_ns < 0) | |
+ lat_ns = 0; | |
+ else | |
+ do_div(lat_ns, speed); | |
+ | |
+ value = lat_ns; | |
+ while (value > PCI_LTR_VALUE_MASK) { | |
+ scale++; | |
+ value = DIV_ROUND_UP(value, (1 << 5)); | |
+ } | |
+ if (scale > E1000_LTRV_SCALE_MAX) { | |
+ e_dbg("Invalid LTR latency scale %d\n", scale); | |
+ return -E1000_ERR_CONFIG; | |
+ } | |
+ lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); | |
+ | |
+ /* Determine the maximum latency tolerated by the platform */ | |
+ pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, | |
+ &max_snoop); | |
+ pci_read_config_word(hw->adapter->pdev, | |
+ E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); | |
+ max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); | |
+ | |
+ if (lat_enc > max_ltr_enc) | |
+ lat_enc = max_ltr_enc; | |
+ } | |
+ | |
+ /* Set Snoop and No-Snoop latencies the same */ | |
+ reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); | |
+ ew32(LTRV, reg); | |
+ | |
+ return 0; | |
+} | |
+ | |
+/** | |
* e1000_check_for_copper_link_ich8lan - Check for link (Copper) | |
* @hw: pointer to the HW structure | |
* | |
@@ -665,34 +993,80 @@ | |
bool link; | |
u16 phy_reg; | |
- /* | |
- * We only want to go out to the PHY registers to see if Auto-Neg | |
+ /* We only want to go out to the PHY registers to see if Auto-Neg | |
* has completed and/or if our link status has changed. The | |
* get_link_status flag is set upon receiving a Link Status | |
* Change or Rx Sequence Error interrupt. | |
*/ | |
- if (!mac->get_link_status) { | |
- ret_val = 0; | |
- goto out; | |
- } | |
+ if (!mac->get_link_status) | |
+ return 0; | |
- /* | |
- * First we want to see if the MII Status Register reports | |
+ /* First we want to see if the MII Status Register reports | |
* link. If so, then we want to get the current speed/duplex | |
* of the PHY. | |
*/ | |
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (hw->mac.type == e1000_pchlan) { | |
ret_val = e1000_k1_gig_workaround_hv(hw, link); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
+ } | |
+ | |
+ /* When connected at 10Mbps half-duplex, 82579 parts are excessively | |
+ * aggressive resulting in many collisions. To avoid this, increase | |
+ * the IPG and reduce Rx latency in the PHY. | |
+ */ | |
+ if ((hw->mac.type == e1000_pch2lan) && link) { | |
+ u32 reg; | |
+ reg = er32(STATUS); | |
+ if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { | |
+ reg = er32(TIPG); | |
+ reg &= ~E1000_TIPG_IPGT_MASK; | |
+ reg |= 0xFF; | |
+ ew32(TIPG, reg); | |
+ | |
+ /* Reduce Rx latency in analog PHY */ | |
+ ret_val = hw->phy.ops.acquire(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ ret_val = | |
+ e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); | |
+ | |
+ hw->phy.ops.release(hw); | |
+ | |
+ if (ret_val) | |
+ return ret_val; | |
+ } | |
+ } | |
+ | |
+ /* Work-around I218 hang issue */ | |
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || | |
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || | |
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || | |
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { | |
+ ret_val = e1000_k1_workaround_lpt_lp(hw, link); | |
+ if (ret_val) | |
+ return ret_val; | |
} | |
+ if (hw->mac.type == e1000_pch_lpt) { | |
+ /* Set platform power management values for | |
+ * Latency Tolerance Reporting (LTR) | |
+ */ | |
+ ret_val = e1000_platform_pm_pch_lpt(hw, link); | |
+ if (ret_val) | |
+ return ret_val; | |
+ } | |
+ | |
+ /* Clear link partner's EEE ability */ | |
+ hw->dev_spec.ich8lan.eee_lp_ability = 0; | |
+ | |
if (!link) | |
- goto out; /* No link detected */ | |
+ return 0; /* No link detected */ | |
mac->get_link_status = false; | |
@@ -700,17 +1074,16 @@ | |
case e1000_pch2lan: | |
ret_val = e1000_k1_workaround_lv(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
/* fall-thru */ | |
case e1000_pchlan: | |
if (hw->phy.type == e1000_phy_82578) { | |
ret_val = e1000_link_stall_workaround_hv(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
- /* | |
- * Workaround for PCHx parts in half-duplex: | |
+ /* Workaround for PCHx parts in half-duplex: | |
* Set the number of preambles removed from the packet | |
* when it is passed from the PHY to the MAC to prevent | |
* the MAC from misinterpreting the packet type. | |
@@ -727,8 +1100,7 @@ | |
break; | |
} | |
- /* | |
- * Check if there was DownShift, must be checked | |
+ /* Check if there was DownShift, must be checked | |
* immediately after link-up | |
*/ | |
e1000e_check_downshift(hw); | |
@@ -736,26 +1108,21 @@ | |
/* Enable/Disable EEE after link up */ | |
ret_val = e1000_set_eee_pchlan(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- /* | |
- * If we are forcing speed/duplex, then we simply return since | |
+ /* If we are forcing speed/duplex, then we simply return since | |
* we have already determined whether we have link or not. | |
*/ | |
- if (!mac->autoneg) { | |
- ret_val = -E1000_ERR_CONFIG; | |
- goto out; | |
- } | |
+ if (!mac->autoneg) | |
+ return -E1000_ERR_CONFIG; | |
- /* | |
- * Auto-Neg is enabled. Auto Speed Detection takes care | |
+ /* Auto-Neg is enabled. Auto Speed Detection takes care | |
* of MAC speed/duplex configuration. So we only need to | |
* configure Collision Distance in the MAC. | |
*/ | |
- e1000e_config_collision_dist(hw); | |
+ mac->ops.config_collision_dist(hw); | |
- /* | |
- * Configure Flow Control now that Auto-Neg has completed. | |
+ /* Configure Flow Control now that Auto-Neg has completed. | |
* First, we need to restore the desired flow control | |
* settings because we may have had to re-autoneg with a | |
* different link partner. | |
@@ -764,7 +1131,6 @@ | |
if (ret_val) | |
e_dbg("Error configuring flow control\n"); | |
-out: | |
return ret_val; | |
} | |
@@ -773,7 +1139,7 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
s32 rc; | |
- rc = e1000_init_mac_params_ich8lan(adapter); | |
+ rc = e1000_init_mac_params_ich8lan(hw); | |
if (rc) | |
return rc; | |
@@ -789,6 +1155,7 @@ | |
break; | |
case e1000_pchlan: | |
case e1000_pch2lan: | |
+ case e1000_pch_lpt: | |
rc = e1000_init_phy_params_pchlan(hw); | |
break; | |
default: | |
@@ -797,8 +1164,7 @@ | |
if (rc) | |
return rc; | |
- /* | |
- * Disable Jumbo Frame support on parts with Intel 10/100 PHY or | |
+ /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or | |
* on parts with MACsec enabled in NVM (reflected in CTRL_EXT). | |
*/ | |
if ((adapter->hw.phy.type == e1000_phy_ife) || | |
@@ -819,10 +1185,6 @@ | |
(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | |
adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; | |
- /* Disable EEE by default until IEEE802.3az spec is finalized */ | |
- if (adapter->flags2 & FLAG2_HAS_EEE) | |
- adapter->hw.dev_spec.ich8lan.eee_disable = true; | |
- | |
return 0; | |
} | |
@@ -834,7 +1196,7 @@ | |
* | |
* Acquires the mutex for performing NVM operations. | |
**/ | |
-static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) | |
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) | |
{ | |
mutex_lock(&nvm_mutex); | |
@@ -847,7 +1209,7 @@ | |
* | |
* Releases the mutex used while performing NVM operations. | |
**/ | |
-static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) | |
+static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) | |
{ | |
mutex_unlock(&nvm_mutex); | |
} | |
@@ -900,8 +1262,7 @@ | |
} | |
if (!timeout) { | |
- e_dbg("Failed to acquire the semaphore, FW or HW has it: " | |
- "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", | |
+ e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", | |
er32(FWSM), extcnf_ctrl); | |
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | |
ew32(EXTCNF_CTRL, extcnf_ctrl); | |
@@ -952,9 +1313,9 @@ | |
u32 fwsm; | |
fwsm = er32(FWSM); | |
- return (fwsm & E1000_ICH_FWSM_FW_VALID) && | |
- ((fwsm & E1000_FWSM_MODE_MASK) == | |
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); | |
+ return ((fwsm & E1000_ICH_FWSM_FW_VALID) && | |
+ ((fwsm & E1000_FWSM_MODE_MASK) == | |
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); | |
} | |
/** | |
@@ -971,7 +1332,146 @@ | |
fwsm = er32(FWSM); | |
return (fwsm & E1000_ICH_FWSM_FW_VALID) && | |
- (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); | |
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); | |
+} | |
+ | |
+/** | |
+ * e1000_rar_set_pch2lan - Set receive address register | |
+ * @hw: pointer to the HW structure | |
+ * @addr: pointer to the receive address | |
+ * @index: receive address array register | |
+ * | |
+ * Sets the receive address array register at index to the address passed | |
+ * in by addr. For 82579, RAR[0] is the base address register that is to | |
+ * contain the MAC address but RAR[1-6] are reserved for manageability (ME). | |
+ * Use SHRA[0-3] in place of those reserved for ME. | |
+ **/ | |
+static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) | |
+{ | |
+ u32 rar_low, rar_high; | |
+ | |
+ /* HW expects these in little endian so we reverse the byte order | |
+ * from network order (big endian) to little endian | |
+ */ | |
+ rar_low = ((u32)addr[0] | | |
+ ((u32)addr[1] << 8) | | |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); | |
+ | |
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); | |
+ | |
+ /* If MAC address zero, no need to set the AV bit */ | |
+ if (rar_low || rar_high) | |
+ rar_high |= E1000_RAH_AV; | |
+ | |
+ if (index == 0) { | |
+ ew32(RAL(index), rar_low); | |
+ e1e_flush(); | |
+ ew32(RAH(index), rar_high); | |
+ e1e_flush(); | |
+ return; | |
+ } | |
+ | |
+ /* RAR[1-6] are owned by manageability. Skip those and program the | |
+ * next address into the SHRA register array. | |
+ */ | |
+ if (index < (u32)(hw->mac.rar_entry_count - 6)) { | |
+ s32 ret_val; | |
+ | |
+ ret_val = e1000_acquire_swflag_ich8lan(hw); | |
+ if (ret_val) | |
+ goto out; | |
+ | |
+ ew32(SHRAL(index - 1), rar_low); | |
+ e1e_flush(); | |
+ ew32(SHRAH(index - 1), rar_high); | |
+ e1e_flush(); | |
+ | |
+ e1000_release_swflag_ich8lan(hw); | |
+ | |
+ /* verify the register updates */ | |
+ if ((er32(SHRAL(index - 1)) == rar_low) && | |
+ (er32(SHRAH(index - 1)) == rar_high)) | |
+ return; | |
+ | |
+ e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", | |
+ (index - 1), er32(FWSM)); | |
+ } | |
+ | |
+out: | |
+ e_dbg("Failed to write receive address at index %d\n", index); | |
+} | |
+ | |
+/** | |
+ * e1000_rar_set_pch_lpt - Set receive address registers | |
+ * @hw: pointer to the HW structure | |
+ * @addr: pointer to the receive address | |
+ * @index: receive address array register | |
+ * | |
+ * Sets the receive address register array at index to the address passed | |
+ * in by addr. For LPT, RAR[0] is the base address register that is to | |
+ * contain the MAC address. SHRA[0-10] are the shared receive address | |
+ * registers that are shared between the Host and manageability engine (ME). | |
+ **/ | |
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) | |
+{ | |
+ u32 rar_low, rar_high; | |
+ u32 wlock_mac; | |
+ | |
+ /* HW expects these in little endian so we reverse the byte order | |
+ * from network order (big endian) to little endian | |
+ */ | |
+ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | | |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); | |
+ | |
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); | |
+ | |
+ /* If MAC address zero, no need to set the AV bit */ | |
+ if (rar_low || rar_high) | |
+ rar_high |= E1000_RAH_AV; | |
+ | |
+ if (index == 0) { | |
+ ew32(RAL(index), rar_low); | |
+ e1e_flush(); | |
+ ew32(RAH(index), rar_high); | |
+ e1e_flush(); | |
+ return; | |
+ } | |
+ | |
+ /* The manageability engine (ME) can lock certain SHRAR registers that | |
+ * it is using - those registers are unavailable for use. | |
+ */ | |
+ if (index < hw->mac.rar_entry_count) { | |
+ wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; | |
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; | |
+ | |
+ /* Check if all SHRAR registers are locked */ | |
+ if (wlock_mac == 1) | |
+ goto out; | |
+ | |
+ if ((wlock_mac == 0) || (index <= wlock_mac)) { | |
+ s32 ret_val; | |
+ | |
+ ret_val = e1000_acquire_swflag_ich8lan(hw); | |
+ | |
+ if (ret_val) | |
+ goto out; | |
+ | |
+ ew32(SHRAL_PCH_LPT(index - 1), rar_low); | |
+ e1e_flush(); | |
+ ew32(SHRAH_PCH_LPT(index - 1), rar_high); | |
+ e1e_flush(); | |
+ | |
+ e1000_release_swflag_ich8lan(hw); | |
+ | |
+ /* verify the register updates */ | |
+ if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && | |
+ (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) | |
+ return; | |
+ } | |
+ } | |
+ | |
+out: | |
+ e_dbg("Failed to write receive address at index %d\n", index); | |
} | |
/** | |
@@ -1002,21 +1502,34 @@ | |
{ | |
u16 phy_data; | |
u32 strap = er32(STRAP); | |
- s32 ret_val = 0; | |
+ u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> | |
+ E1000_STRAP_SMT_FREQ_SHIFT; | |
+ s32 ret_val; | |
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; | |
ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
phy_data &= ~HV_SMB_ADDR_MASK; | |
phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); | |
phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; | |
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); | |
-out: | |
- return ret_val; | |
+ if (hw->phy.type == e1000_phy_i217) { | |
+ /* Restore SMBus frequency */ | |
+ if (freq--) { | |
+ phy_data &= ~HV_SMB_ADDR_FREQ_MASK; | |
+ phy_data |= (freq & (1 << 0)) << | |
+ HV_SMB_ADDR_FREQ_LOW_SHIFT; | |
+ phy_data |= (freq & (1 << 1)) << | |
+ (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); | |
+ } else { | |
+ e_dbg("Unsupported SMB frequency in PHY\n"); | |
+ } | |
+ } | |
+ | |
+ return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); | |
} | |
/** | |
@@ -1033,8 +1546,7 @@ | |
s32 ret_val = 0; | |
u16 word_addr, reg_data, reg_addr, phy_page = 0; | |
- /* | |
- * Initialize the PHY from the NVM on ICH platforms. This | |
+ /* Initialize the PHY from the NVM on ICH platforms. This | |
* is needed due to an issue where the NVM configuration is | |
* not properly autoloaded after power transitions. | |
* Therefore, after each PHY reset, we will load the | |
@@ -1053,6 +1565,7 @@ | |
/* Fall-thru */ | |
case e1000_pchlan: | |
case e1000_pch2lan: | |
+ case e1000_pch_lpt: | |
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; | |
break; | |
default: | |
@@ -1065,45 +1578,42 @@ | |
data = er32(FEXTNVM); | |
if (!(data & sw_cfg_mask)) | |
- goto out; | |
+ goto release; | |
- /* | |
- * Make sure HW does not configure LCD from PHY | |
+ /* Make sure HW does not configure LCD from PHY | |
* extended configuration before SW configuration | |
*/ | |
data = er32(EXTCNF_CTRL); | |
- if (!(hw->mac.type == e1000_pch2lan)) { | |
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) | |
- goto out; | |
- } | |
+ if ((hw->mac.type < e1000_pch2lan) && | |
+ (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) | |
+ goto release; | |
cnf_size = er32(EXTCNF_SIZE); | |
cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; | |
cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; | |
if (!cnf_size) | |
- goto out; | |
+ goto release; | |
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; | |
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; | |
- if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && | |
- (hw->mac.type == e1000_pchlan)) || | |
- (hw->mac.type == e1000_pch2lan)) { | |
- /* | |
- * HW configures the SMBus address and LEDs when the | |
+ if (((hw->mac.type == e1000_pchlan) && | |
+ !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || | |
+ (hw->mac.type > e1000_pchlan)) { | |
+ /* HW configures the SMBus address and LEDs when the | |
* OEM and LCD Write Enable bits are set in the NVM. | |
* When both NVM bits are cleared, SW will configure | |
* them instead. | |
*/ | |
ret_val = e1000_write_smbus_addr(hw); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
data = er32(LEDCTL); | |
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, | |
(u16)data); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
} | |
/* Configure LCD from extended configuration region. */ | |
@@ -1112,15 +1622,14 @@ | |
word_addr = (u16)(cnf_base_addr << 1); | |
for (i = 0; i < cnf_size; i++) { | |
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, | |
- ®_data); | |
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), | |
1, ®_addr); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
/* Save off the PHY page for future writes. */ | |
if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { | |
@@ -1131,13 +1640,12 @@ | |
reg_addr &= PHY_REG_MASK; | |
reg_addr |= phy_page; | |
- ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, | |
- reg_data); | |
+ ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
} | |
-out: | |
+release: | |
hw->phy.ops.release(hw); | |
return ret_val; | |
} | |
@@ -1159,57 +1667,54 @@ | |
bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; | |
if (hw->mac.type != e1000_pchlan) | |
- goto out; | |
+ return 0; | |
/* Wrap the whole flow with the sw flag */ | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ | |
if (link) { | |
if (hw->phy.type == e1000_phy_82578) { | |
- ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, | |
- &status_reg); | |
+ ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, | |
+ &status_reg); | |
if (ret_val) | |
goto release; | |
- status_reg &= BM_CS_STATUS_LINK_UP | | |
- BM_CS_STATUS_RESOLVED | | |
- BM_CS_STATUS_SPEED_MASK; | |
+ status_reg &= (BM_CS_STATUS_LINK_UP | | |
+ BM_CS_STATUS_RESOLVED | | |
+ BM_CS_STATUS_SPEED_MASK); | |
if (status_reg == (BM_CS_STATUS_LINK_UP | | |
- BM_CS_STATUS_RESOLVED | | |
- BM_CS_STATUS_SPEED_1000)) | |
+ BM_CS_STATUS_RESOLVED | | |
+ BM_CS_STATUS_SPEED_1000)) | |
k1_enable = false; | |
} | |
if (hw->phy.type == e1000_phy_82577) { | |
- ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, | |
- &status_reg); | |
+ ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); | |
if (ret_val) | |
goto release; | |
- status_reg &= HV_M_STATUS_LINK_UP | | |
- HV_M_STATUS_AUTONEG_COMPLETE | | |
- HV_M_STATUS_SPEED_MASK; | |
+ status_reg &= (HV_M_STATUS_LINK_UP | | |
+ HV_M_STATUS_AUTONEG_COMPLETE | | |
+ HV_M_STATUS_SPEED_MASK); | |
if (status_reg == (HV_M_STATUS_LINK_UP | | |
- HV_M_STATUS_AUTONEG_COMPLETE | | |
- HV_M_STATUS_SPEED_1000)) | |
+ HV_M_STATUS_AUTONEG_COMPLETE | | |
+ HV_M_STATUS_SPEED_1000)) | |
k1_enable = false; | |
} | |
/* Link stall fix for link up */ | |
- ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), | |
- 0x0100); | |
+ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); | |
if (ret_val) | |
goto release; | |
} else { | |
/* Link stall fix for link down */ | |
- ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), | |
- 0x4100); | |
+ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); | |
if (ret_val) | |
goto release; | |
} | |
@@ -1218,7 +1723,7 @@ | |
release: | |
hw->phy.ops.release(hw); | |
-out: | |
+ | |
return ret_val; | |
} | |
@@ -1234,30 +1739,28 @@ | |
**/ | |
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) | |
{ | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
u32 ctrl_reg = 0; | |
u32 ctrl_ext = 0; | |
u32 reg = 0; | |
u16 kmrn_reg = 0; | |
- ret_val = e1000e_read_kmrn_reg_locked(hw, | |
- E1000_KMRNCTRLSTA_K1_CONFIG, | |
- &kmrn_reg); | |
+ ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, | |
+ &kmrn_reg); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (k1_enable) | |
kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; | |
else | |
kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; | |
- ret_val = e1000e_write_kmrn_reg_locked(hw, | |
- E1000_KMRNCTRLSTA_K1_CONFIG, | |
- kmrn_reg); | |
+ ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, | |
+ kmrn_reg); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- udelay(20); | |
+ usleep_range(20, 40); | |
ctrl_ext = er32(CTRL_EXT); | |
ctrl_reg = er32(CTRL); | |
@@ -1267,14 +1770,13 @@ | |
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); | |
e1e_flush(); | |
- udelay(20); | |
+ usleep_range(20, 40); | |
ew32(CTRL, ctrl_reg); | |
ew32(CTRL_EXT, ctrl_ext); | |
e1e_flush(); | |
- udelay(20); | |
+ usleep_range(20, 40); | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -1292,28 +1794,28 @@ | |
u32 mac_reg; | |
u16 oem_reg; | |
- if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) | |
+ if (hw->mac.type < e1000_pchlan) | |
return ret_val; | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
return ret_val; | |
- if (!(hw->mac.type == e1000_pch2lan)) { | |
+ if (hw->mac.type == e1000_pchlan) { | |
mac_reg = er32(EXTCNF_CTRL); | |
if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) | |
- goto out; | |
+ goto release; | |
} | |
mac_reg = er32(FEXTNVM); | |
if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) | |
- goto out; | |
+ goto release; | |
mac_reg = er32(PHY_CTRL); | |
- ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); | |
+ ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); | |
@@ -1323,10 +1825,6 @@ | |
if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) | |
oem_reg |= HV_OEM_BITS_LPLU; | |
- | |
- /* Set Restart auto-neg to activate the bits */ | |
- if (!e1000_check_reset_block(hw)) | |
- oem_reg |= HV_OEM_BITS_RESTART_AN; | |
} else { | |
if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | | |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) | |
@@ -1337,15 +1835,19 @@ | |
oem_reg |= HV_OEM_BITS_LPLU; | |
} | |
- ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); | |
+ /* Set Restart auto-neg to activate the bits */ | |
+ if ((d0_state || (hw->mac.type != e1000_pchlan)) && | |
+ !hw->phy.ops.check_reset_block(hw)) | |
+ oem_reg |= HV_OEM_BITS_RESTART_AN; | |
-out: | |
+ ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); | |
+ | |
+release: | |
hw->phy.ops.release(hw); | |
return ret_val; | |
} | |
- | |
/** | |
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode | |
* @hw: pointer to the HW structure | |
@@ -1376,13 +1878,13 @@ | |
u16 phy_data; | |
if (hw->mac.type != e1000_pchlan) | |
- return ret_val; | |
+ return 0; | |
/* Set MDIO slow mode before any other MDIO access */ | |
if (hw->phy.type == e1000_phy_82577) { | |
ret_val = e1000_set_mdio_slow_mode_hv(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
if (((hw->phy.type == e1000_phy_82577) && | |
@@ -1400,13 +1902,12 @@ | |
} | |
if (hw->phy.type == e1000_phy_82578) { | |
- /* | |
- * Return registers to default by doing a soft reset then | |
+ /* Return registers to default by doing a soft reset then | |
* writing 0x3140 to the control register. | |
*/ | |
if (hw->phy.revision < 2) { | |
e1000e_phy_sw_reset(hw); | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); | |
+ ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); | |
} | |
} | |
@@ -1419,28 +1920,31 @@ | |
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); | |
hw->phy.ops.release(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- /* | |
- * Configure the K1 Si workaround during phy reset assuming there is | |
+ /* Configure the K1 Si workaround during phy reset assuming there is | |
* link so that it disables K1 if link is in 1Gbps. | |
*/ | |
ret_val = e1000_k1_gig_workaround_hv(hw, true); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
/* Workaround for link disconnects on a busy hub in half duplex */ | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
- ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); | |
+ return ret_val; | |
+ ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); | |
+ if (ret_val) | |
+ goto release; | |
+ ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); | |
if (ret_val) | |
goto release; | |
- ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, | |
- phy_data & 0x00FF); | |
+ | |
+ /* set MSE higher to enable link to stay up when noise is high */ | |
+ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); | |
release: | |
hw->phy.ops.release(hw); | |
-out: | |
+ | |
return ret_val; | |
} | |
@@ -1461,8 +1965,8 @@ | |
if (ret_val) | |
goto release; | |
- /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ | |
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { | |
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ | |
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) { | |
mac_reg = er32(RAL(i)); | |
hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), | |
(u16)(mac_reg & 0xFFFF)); | |
@@ -1496,22 +2000,21 @@ | |
u32 mac_reg; | |
u16 i; | |
- if (hw->mac.type != e1000_pch2lan) | |
- goto out; | |
+ if (hw->mac.type < e1000_pch2lan) | |
+ return 0; | |
/* disable Rx path while enabling/disabling workaround */ | |
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); | |
ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (enable) { | |
- /* | |
- * Write Rx addresses (rar_entry_count for RAL/H, +4 for | |
+ /* Write Rx addresses (rar_entry_count for RAL/H, and | |
* SHRAL/H) and initial CRC values to the MAC | |
*/ | |
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { | |
- u8 mac_addr[ETH_ALEN] = {0}; | |
+ for (i = 0; i < hw->mac.rar_entry_count; i++) { | |
+ u8 mac_addr[ETH_ALEN] = { 0 }; | |
u32 addr_high, addr_low; | |
addr_high = er32(RAH(i)); | |
@@ -1542,27 +2045,27 @@ | |
ew32(RCTL, mac_reg); | |
ret_val = e1000e_read_kmrn_reg(hw, | |
- E1000_KMRNCTRLSTA_CTRL_OFFSET, | |
- &data); | |
+ E1000_KMRNCTRLSTA_CTRL_OFFSET, | |
+ &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1000e_write_kmrn_reg(hw, | |
E1000_KMRNCTRLSTA_CTRL_OFFSET, | |
data | (1 << 0)); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1000e_read_kmrn_reg(hw, | |
- E1000_KMRNCTRLSTA_HD_CTRL, | |
- &data); | |
+ E1000_KMRNCTRLSTA_HD_CTRL, | |
+ &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
data &= ~(0xF << 8); | |
data |= (0xB << 8); | |
ret_val = e1000e_write_kmrn_reg(hw, | |
E1000_KMRNCTRLSTA_HD_CTRL, | |
data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
/* Enable jumbo frame workaround in the PHY */ | |
e1e_rphy(hw, PHY_REG(769, 23), &data); | |
@@ -1570,25 +2073,25 @@ | |
data |= (0x37 << 5); | |
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1e_rphy(hw, PHY_REG(769, 16), &data); | |
data &= ~(1 << 13); | |
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1e_rphy(hw, PHY_REG(776, 20), &data); | |
data &= ~(0x3FF << 2); | |
data |= (0x1A << 2); | |
ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1e_rphy(hw, HV_PM_CTRL, &data); | |
ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} else { | |
/* Write MAC register values back to h/w defaults */ | |
mac_reg = er32(FFLT_DBG); | |
@@ -1600,59 +2103,56 @@ | |
ew32(RCTL, mac_reg); | |
ret_val = e1000e_read_kmrn_reg(hw, | |
- E1000_KMRNCTRLSTA_CTRL_OFFSET, | |
- &data); | |
+ E1000_KMRNCTRLSTA_CTRL_OFFSET, | |
+ &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1000e_write_kmrn_reg(hw, | |
E1000_KMRNCTRLSTA_CTRL_OFFSET, | |
data & ~(1 << 0)); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1000e_read_kmrn_reg(hw, | |
- E1000_KMRNCTRLSTA_HD_CTRL, | |
- &data); | |
+ E1000_KMRNCTRLSTA_HD_CTRL, | |
+ &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
data &= ~(0xF << 8); | |
data |= (0xB << 8); | |
ret_val = e1000e_write_kmrn_reg(hw, | |
E1000_KMRNCTRLSTA_HD_CTRL, | |
data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
/* Write PHY register values back to h/w defaults */ | |
e1e_rphy(hw, PHY_REG(769, 23), &data); | |
data &= ~(0x7F << 5); | |
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1e_rphy(hw, PHY_REG(769, 16), &data); | |
data |= (1 << 13); | |
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1e_rphy(hw, PHY_REG(776, 20), &data); | |
data &= ~(0x3FF << 2); | |
data |= (0x8 << 2); | |
ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1e_rphy(hw, HV_PM_CTRL, &data); | |
ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
/* re-enable Rx path after enabling/disabling workaround */ | |
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); | |
- | |
-out: | |
- return ret_val; | |
+ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); | |
} | |
/** | |
@@ -1664,12 +2164,25 @@ | |
s32 ret_val = 0; | |
if (hw->mac.type != e1000_pch2lan) | |
- goto out; | |
+ return 0; | |
/* Set MDIO slow mode before any other MDIO access */ | |
ret_val = e1000_set_mdio_slow_mode_hv(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ ret_val = hw->phy.ops.acquire(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ /* set MSE higher to enable link to stay up when noise is high */ | |
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); | |
+ if (ret_val) | |
+ goto release; | |
+ /* drop link after 5 times MSE threshold was reached */ | |
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); | |
+release: | |
+ hw->phy.ops.release(hw); | |
-out: | |
return ret_val; | |
} | |
@@ -1687,12 +2200,12 @@ | |
u16 phy_reg; | |
if (hw->mac.type != e1000_pch2lan) | |
- goto out; | |
+ return 0; | |
/* Set K1 beacon duration based on 1Gbps speed or otherwise */ | |
ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) | |
== (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { | |
@@ -1701,11 +2214,21 @@ | |
ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (status_reg & HV_M_STATUS_SPEED_1000) { | |
+ u16 pm_phy_reg; | |
+ | |
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; | |
phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; | |
+ /* LV 1G Packet drop issue wa */ | |
+ ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); | |
+ if (ret_val) | |
+ return ret_val; | |
+ pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; | |
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); | |
+ if (ret_val) | |
+ return ret_val; | |
} else { | |
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; | |
phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; | |
@@ -1714,7 +2237,6 @@ | |
ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); | |
} | |
-out: | |
return ret_val; | |
} | |
@@ -1730,7 +2252,7 @@ | |
{ | |
u32 extcnf_ctrl; | |
- if (hw->mac.type != e1000_pch2lan) | |
+ if (hw->mac.type < e1000_pch2lan) | |
return; | |
extcnf_ctrl = er32(EXTCNF_CTRL); | |
@@ -1741,7 +2263,6 @@ | |
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; | |
ew32(EXTCNF_CTRL, extcnf_ctrl); | |
- return; | |
} | |
/** | |
@@ -1759,11 +2280,10 @@ | |
do { | |
data = er32(STATUS); | |
data &= E1000_STATUS_LAN_INIT_DONE; | |
- udelay(100); | |
+ usleep_range(100, 200); | |
} while ((!data) && --loop); | |
- /* | |
- * If basic configuration is incomplete before the above loop | |
+ /* If basic configuration is incomplete before the above loop | |
* count reaches 0, loading the configuration from NVM will | |
* leave the PHY in a bad state possibly resulting in no link. | |
*/ | |
@@ -1785,8 +2305,8 @@ | |
s32 ret_val = 0; | |
u16 reg; | |
- if (e1000_check_reset_block(hw)) | |
- goto out; | |
+ if (hw->phy.ops.check_reset_block(hw)) | |
+ return 0; | |
/* Allow time for h/w to get to quiescent state after reset */ | |
usleep_range(10000, 20000); | |
@@ -1796,12 +2316,12 @@ | |
case e1000_pchlan: | |
ret_val = e1000_hv_phy_workarounds_ich8lan(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
break; | |
case e1000_pch2lan: | |
ret_val = e1000_lv_phy_workarounds_ich8lan(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
break; | |
default: | |
break; | |
@@ -1817,7 +2337,7 @@ | |
/* Configure the LCD with the extended configuration region in NVM */ | |
ret_val = e1000_sw_lcd_config_ich8lan(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
/* Configure the LCD with the OEM bits in NVM */ | |
ret_val = e1000_oem_bits_config_ich8lan(hw, true); | |
@@ -1832,18 +2352,13 @@ | |
/* Set EEE LPI Update Timer to 200usec */ | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, | |
- I82579_LPI_UPDATE_TIMER); | |
- if (ret_val) | |
- goto release; | |
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, | |
- 0x1387); | |
-release: | |
+ return ret_val; | |
+ ret_val = e1000_write_emi_reg_locked(hw, | |
+ I82579_LPI_UPDATE_TIMER, | |
+ 0x1387); | |
hw->phy.ops.release(hw); | |
} | |
-out: | |
return ret_val; | |
} | |
@@ -1866,12 +2381,9 @@ | |
ret_val = e1000e_phy_hw_reset_generic(hw); | |
if (ret_val) | |
- goto out; | |
- | |
- ret_val = e1000_post_phy_reset_ich8lan(hw); | |
+ return ret_val; | |
-out: | |
- return ret_val; | |
+ return e1000_post_phy_reset_ich8lan(hw); | |
} | |
/** | |
@@ -1887,23 +2399,22 @@ | |
**/ | |
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) | |
{ | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
u16 oem_reg; | |
ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (active) | |
oem_reg |= HV_OEM_BITS_LPLU; | |
else | |
oem_reg &= ~HV_OEM_BITS_LPLU; | |
- oem_reg |= HV_OEM_BITS_RESTART_AN; | |
- ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); | |
+ if (!hw->phy.ops.check_reset_block(hw)) | |
+ oem_reg |= HV_OEM_BITS_RESTART_AN; | |
-out: | |
- return ret_val; | |
+ return e1e_wphy(hw, HV_OEM_BITS, oem_reg); | |
} | |
/** | |
@@ -1927,7 +2438,7 @@ | |
u16 data; | |
if (phy->type == e1000_phy_ife) | |
- return ret_val; | |
+ return 0; | |
phy_ctrl = er32(PHY_CTRL); | |
@@ -1938,8 +2449,7 @@ | |
if (phy->type != e1000_phy_igp_3) | |
return 0; | |
- /* | |
- * Call gig speed drop workaround on LPLU before accessing | |
+ /* Call gig speed drop workaround on LPLU before accessing | |
* any PHY registers | |
*/ | |
if (hw->mac.type == e1000_ich8lan) | |
@@ -1947,6 +2457,8 @@ | |
/* When LPLU is enabled, we should disable SmartSpeed */ | |
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); | |
+ if (ret_val) | |
+ return ret_val; | |
data &= ~IGP01E1000_PSCFR_SMART_SPEED; | |
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); | |
if (ret_val) | |
@@ -1958,8 +2470,7 @@ | |
if (phy->type != e1000_phy_igp_3) | |
return 0; | |
- /* | |
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
* during Dx states where the power conservation is most | |
* important. During driver activity we should enable | |
* SmartSpeed, so performance is maintained. | |
@@ -2009,7 +2520,7 @@ | |
{ | |
struct e1000_phy_info *phy = &hw->phy; | |
u32 phy_ctrl; | |
- s32 ret_val; | |
+ s32 ret_val = 0; | |
u16 data; | |
phy_ctrl = er32(PHY_CTRL); | |
@@ -2021,8 +2532,7 @@ | |
if (phy->type != e1000_phy_igp_3) | |
return 0; | |
- /* | |
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
* during Dx states where the power conservation is most | |
* important. During driver activity we should enable | |
* SmartSpeed, so performance is maintained. | |
@@ -2059,8 +2569,7 @@ | |
if (phy->type != e1000_phy_igp_3) | |
return 0; | |
- /* | |
- * Call gig speed drop workaround on LPLU before accessing | |
+ /* Call gig speed drop workaround on LPLU before accessing | |
* any PHY registers | |
*/ | |
if (hw->mac.type == e1000_ich8lan) | |
@@ -2075,7 +2584,7 @@ | |
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); | |
} | |
- return 0; | |
+ return ret_val; | |
} | |
/** | |
@@ -2093,7 +2602,7 @@ | |
u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); | |
u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; | |
u8 sig_byte = 0; | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
switch (hw->mac.type) { | |
case e1000_ich8lan: | |
@@ -2108,8 +2617,7 @@ | |
return 0; | |
} | |
- e_dbg("Unable to determine valid NVM bank via EEC - " | |
- "reading flash signature\n"); | |
+ e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); | |
/* fall-thru */ | |
default: | |
/* set bank to 0 in case flash read fails */ | |
@@ -2117,7 +2625,7 @@ | |
/* Check bank 0 */ | |
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, | |
- &sig_byte); | |
+ &sig_byte); | |
if (ret_val) | |
return ret_val; | |
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == | |
@@ -2128,8 +2636,8 @@ | |
/* Check bank 1 */ | |
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + | |
- bank1_offset, | |
- &sig_byte); | |
+ bank1_offset, | |
+ &sig_byte); | |
if (ret_val) | |
return ret_val; | |
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == | |
@@ -2141,8 +2649,6 @@ | |
e_dbg("ERROR: No valid NVM bank present\n"); | |
return -E1000_ERR_NVM; | |
} | |
- | |
- return 0; | |
} | |
/** | |
@@ -2184,8 +2690,8 @@ | |
ret_val = 0; | |
for (i = 0; i < words; i++) { | |
- if (dev_spec->shadow_ram[offset+i].modified) { | |
- data[i] = dev_spec->shadow_ram[offset+i].value; | |
+ if (dev_spec->shadow_ram[offset + i].modified) { | |
+ data[i] = dev_spec->shadow_ram[offset + i].value; | |
} else { | |
ret_val = e1000_read_flash_word_ich8lan(hw, | |
act_offset + i, | |
@@ -2220,9 +2726,8 @@ | |
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | |
/* Check if the flash descriptor is valid */ | |
- if (hsfsts.hsf_status.fldesvalid == 0) { | |
- e_dbg("Flash descriptor invalid. " | |
- "SW Sequencing must be used.\n"); | |
+ if (!hsfsts.hsf_status.fldesvalid) { | |
+ e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); | |
return -E1000_ERR_NVM; | |
} | |
@@ -2232,8 +2737,7 @@ | |
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | |
- /* | |
- * Either we should have a hardware SPI cycle in progress | |
+ /* Either we should have a hardware SPI cycle in progress | |
* bit to check against, in order to start a new cycle or | |
* FDONE bit should be changed in the hardware so that it | |
* is 1 after hardware reset, which can then be used as an | |
@@ -2241,9 +2745,8 @@ | |
* completed. | |
*/ | |
- if (hsfsts.hsf_status.flcinprog == 0) { | |
- /* | |
- * There is no cycle running at present, | |
+ if (!hsfsts.hsf_status.flcinprog) { | |
+ /* There is no cycle running at present, | |
* so we can start a cycle. | |
* Begin by setting Flash Cycle Done. | |
*/ | |
@@ -2251,23 +2754,21 @@ | |
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | |
ret_val = 0; | |
} else { | |
- s32 i = 0; | |
+ s32 i; | |
- /* | |
- * Otherwise poll for sometime so the current | |
+ /* Otherwise poll for sometime so the current | |
* cycle has a chance to end before giving up. | |
*/ | |
for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { | |
- hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); | |
- if (hsfsts.hsf_status.flcinprog == 0) { | |
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | |
+ if (!hsfsts.hsf_status.flcinprog) { | |
ret_val = 0; | |
break; | |
} | |
udelay(1); | |
} | |
- if (ret_val == 0) { | |
- /* | |
- * Successful in waiting for previous cycle to timeout, | |
+ if (!ret_val) { | |
+ /* Successful in waiting for previous cycle to timeout, | |
* now set the Flash Cycle Done. | |
*/ | |
hsfsts.hsf_status.flcdone = 1; | |
@@ -2291,7 +2792,6 @@ | |
{ | |
union ich8_hws_flash_ctrl hsflctl; | |
union ich8_hws_flash_status hsfsts; | |
- s32 ret_val = -E1000_ERR_NVM; | |
u32 i = 0; | |
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ | |
@@ -2302,15 +2802,15 @@ | |
/* wait till FDONE bit is set to 1 */ | |
do { | |
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | |
- if (hsfsts.hsf_status.flcdone == 1) | |
+ if (hsfsts.hsf_status.flcdone) | |
break; | |
udelay(1); | |
} while (i++ < timeout); | |
- if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) | |
+ if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) | |
return 0; | |
- return ret_val; | |
+ return -E1000_ERR_NVM; | |
} | |
/** | |
@@ -2373,17 +2873,17 @@ | |
s32 ret_val = -E1000_ERR_NVM; | |
u8 count = 0; | |
- if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) | |
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) | |
return -E1000_ERR_NVM; | |
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + | |
- hw->nvm.flash_base_addr; | |
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + | |
+ hw->nvm.flash_base_addr); | |
do { | |
udelay(1); | |
/* Steps */ | |
ret_val = e1000_flash_cycle_init_ich8lan(hw); | |
- if (ret_val != 0) | |
+ if (ret_val) | |
break; | |
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | |
@@ -2394,16 +2894,16 @@ | |
ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | |
- ret_val = e1000_flash_cycle_ich8lan(hw, | |
- ICH_FLASH_READ_COMMAND_TIMEOUT); | |
+ ret_val = | |
+ e1000_flash_cycle_ich8lan(hw, | |
+ ICH_FLASH_READ_COMMAND_TIMEOUT); | |
- /* | |
- * Check if FCERR is set to 1, if set to 1, clear it | |
+ /* Check if FCERR is set to 1, if set to 1, clear it | |
* and try the whole sequence a few more times, else | |
* read in (shift in) the Flash Data0, the order is | |
* least significant byte first msb to lsb | |
*/ | |
- if (ret_val == 0) { | |
+ if (!ret_val) { | |
flash_data = er32flash(ICH_FLASH_FDATA0); | |
if (size == 1) | |
*data = (u8)(flash_data & 0x000000FF); | |
@@ -2411,19 +2911,17 @@ | |
*data = (u16)(flash_data & 0x0000FFFF); | |
break; | |
} else { | |
- /* | |
- * If we've gotten here, then things are probably | |
+ /* If we've gotten here, then things are probably | |
* completely hosed, but if the error condition is | |
* detected, it won't hurt to give it another try... | |
* ICH_FLASH_CYCLE_REPEAT_COUNT times. | |
*/ | |
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | |
- if (hsfsts.hsf_status.flcerr == 1) { | |
+ if (hsfsts.hsf_status.flcerr) { | |
/* Repeat for some time before giving up. */ | |
continue; | |
- } else if (hsfsts.hsf_status.flcdone == 0) { | |
- e_dbg("Timeout error - flash cycle " | |
- "did not complete.\n"); | |
+ } else if (!hsfsts.hsf_status.flcdone) { | |
+ e_dbg("Timeout error - flash cycle did not complete.\n"); | |
break; | |
} | |
} | |
@@ -2457,8 +2955,8 @@ | |
nvm->ops.acquire(hw); | |
for (i = 0; i < words; i++) { | |
- dev_spec->shadow_ram[offset+i].modified = true; | |
- dev_spec->shadow_ram[offset+i].value = data[i]; | |
+ dev_spec->shadow_ram[offset + i].modified = true; | |
+ dev_spec->shadow_ram[offset + i].value = data[i]; | |
} | |
nvm->ops.release(hw); | |
@@ -2494,12 +2992,11 @@ | |
nvm->ops.acquire(hw); | |
- /* | |
- * We're writing to the opposite bank so if we're on bank 1, | |
+ /* We're writing to the opposite bank so if we're on bank 1, | |
* write to bank 0 etc. We also need to erase the segment that | |
* is going to be written | |
*/ | |
- ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | |
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | |
if (ret_val) { | |
e_dbg("Could not detect valid bank, assuming bank 0\n"); | |
bank = 0; | |
@@ -2520,8 +3017,7 @@ | |
} | |
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | |
- /* | |
- * Determine whether to write the value stored | |
+ /* Determine whether to write the value stored | |
* in the other NVM bank or a modified value stored | |
* in the shadow RAM | |
*/ | |
@@ -2529,14 +3025,13 @@ | |
data = dev_spec->shadow_ram[i].value; | |
} else { | |
ret_val = e1000_read_flash_word_ich8lan(hw, i + | |
- old_bank_offset, | |
- &data); | |
+ old_bank_offset, | |
+ &data); | |
if (ret_val) | |
break; | |
} | |
- /* | |
- * If the word is 0x13, then make sure the signature bits | |
+ /* If the word is 0x13, then make sure the signature bits | |
* (15:14) are 11b until the commit has completed. | |
* This will allow us to write 10b which indicates the | |
* signature is valid. We want to do this after the write | |
@@ -2549,7 +3044,7 @@ | |
/* Convert offset to bytes. */ | |
act_offset = (i + new_bank_offset) << 1; | |
- udelay(100); | |
+ usleep_range(100, 200); | |
/* Write the bytes to the new bank. */ | |
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, | |
act_offset, | |
@@ -2557,16 +3052,15 @@ | |
if (ret_val) | |
break; | |
- udelay(100); | |
+ usleep_range(100, 200); | |
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, | |
- act_offset + 1, | |
- (u8)(data >> 8)); | |
+ act_offset + 1, | |
+ (u8)(data >> 8)); | |
if (ret_val) | |
break; | |
} | |
- /* | |
- * Don't bother writing the segment valid bits if sector | |
+ /* Don't bother writing the segment valid bits if sector | |
* programming failed. | |
*/ | |
if (ret_val) { | |
@@ -2575,8 +3069,7 @@ | |
goto release; | |
} | |
- /* | |
- * Finally validate the new segment by setting bit 15:14 | |
+ /* Finally validate the new segment by setting bit 15:14 | |
* to 10b in word 0x13 , this can be done without an | |
* erase as well since these bits are 11 to start with | |
* and we need to change bit 14 to 0b | |
@@ -2593,8 +3086,7 @@ | |
if (ret_val) | |
goto release; | |
- /* | |
- * And invalidate the previously valid segment by setting | |
+ /* And invalidate the previously valid segment by setting | |
* its signature word (0x13) high_byte to 0b. This can be | |
* done without an erase because flash erase sets all bits | |
* to 1's. We can write 1's to 0's without an erase | |
@@ -2613,12 +3105,11 @@ | |
release: | |
nvm->ops.release(hw); | |
- /* | |
- * Reload the EEPROM, or else modifications will not appear | |
+ /* Reload the EEPROM, or else modifications will not appear | |
* until after the next adapter reset. | |
*/ | |
if (!ret_val) { | |
- e1000e_reload_nvm(hw); | |
+ nvm->ops.reload(hw); | |
usleep_range(10000, 20000); | |
} | |
@@ -2641,20 +3132,32 @@ | |
{ | |
s32 ret_val; | |
u16 data; | |
+ u16 word; | |
+ u16 valid_csum_mask; | |
- /* | |
- * Read 0x19 and check bit 6. If this bit is 0, the checksum | |
- * needs to be fixed. This bit is an indication that the NVM | |
- * was prepared by OEM software and did not calculate the | |
- * checksum...a likely scenario. | |
+ /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, | |
+ * the checksum needs to be fixed. This bit is an indication that | |
+ * the NVM was prepared by OEM software and did not calculate | |
+ * the checksum...a likely scenario. | |
*/ | |
- ret_val = e1000_read_nvm(hw, 0x19, 1, &data); | |
+ switch (hw->mac.type) { | |
+ case e1000_pch_lpt: | |
+ word = NVM_COMPAT; | |
+ valid_csum_mask = NVM_COMPAT_VALID_CSUM; | |
+ break; | |
+ default: | |
+ word = NVM_FUTURE_INIT_WORD1; | |
+ valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; | |
+ break; | |
+ } | |
+ | |
+ ret_val = e1000_read_nvm(hw, word, 1, &data); | |
if (ret_val) | |
return ret_val; | |
- if ((data & 0x40) == 0) { | |
- data |= 0x40; | |
- ret_val = e1000_write_nvm(hw, 0x19, 1, &data); | |
+ if (!(data & valid_csum_mask)) { | |
+ data |= valid_csum_mask; | |
+ ret_val = e1000_write_nvm(hw, word, 1, &data); | |
if (ret_val) | |
return ret_val; | |
ret_val = e1000e_update_nvm_checksum(hw); | |
@@ -2693,8 +3196,7 @@ | |
pr0.range.wpe = true; | |
ew32flash(ICH_FLASH_PR0, pr0.regval); | |
- /* | |
- * Lock down a subset of GbE Flash Control Registers, e.g. | |
+ /* Lock down a subset of GbE Flash Control Registers, e.g. | |
* PR0 to prevent the write-protection from being lifted. | |
* Once FLOCKDN is set, the registers protected by it cannot | |
* be written until FLOCKDN is cleared by a hardware reset. | |
@@ -2729,8 +3231,8 @@ | |
offset > ICH_FLASH_LINEAR_ADDR_MASK) | |
return -E1000_ERR_NVM; | |
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + | |
- hw->nvm.flash_base_addr; | |
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + | |
+ hw->nvm.flash_base_addr); | |
do { | |
udelay(1); | |
@@ -2741,7 +3243,7 @@ | |
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | |
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | |
- hsflctl.hsf_ctrl.fldbcount = size -1; | |
+ hsflctl.hsf_ctrl.fldbcount = size - 1; | |
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; | |
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | |
@@ -2754,28 +3256,26 @@ | |
ew32flash(ICH_FLASH_FDATA0, flash_data); | |
- /* | |
- * check if FCERR is set to 1 , if set to 1, clear it | |
+ /* check if FCERR is set to 1 , if set to 1, clear it | |
* and try the whole sequence a few more times else done | |
*/ | |
- ret_val = e1000_flash_cycle_ich8lan(hw, | |
- ICH_FLASH_WRITE_COMMAND_TIMEOUT); | |
+ ret_val = | |
+ e1000_flash_cycle_ich8lan(hw, | |
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT); | |
if (!ret_val) | |
break; | |
- /* | |
- * If we're here, then things are most likely | |
+ /* If we're here, then things are most likely | |
* completely hosed, but if the error condition | |
* is detected, it won't hurt to give it another | |
* try...ICH_FLASH_CYCLE_REPEAT_COUNT times. | |
*/ | |
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | |
- if (hsfsts.hsf_status.flcerr == 1) | |
+ if (hsfsts.hsf_status.flcerr) | |
/* Repeat for some time before giving up. */ | |
continue; | |
- if (hsfsts.hsf_status.flcdone == 0) { | |
- e_dbg("Timeout error - flash cycle " | |
- "did not complete."); | |
+ if (!hsfsts.hsf_status.flcdone) { | |
+ e_dbg("Timeout error - flash cycle did not complete.\n"); | |
break; | |
} | |
} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); | |
@@ -2820,7 +3320,7 @@ | |
for (program_retries = 0; program_retries < 100; program_retries++) { | |
e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); | |
- udelay(100); | |
+ usleep_range(100, 200); | |
ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); | |
if (!ret_val) | |
break; | |
@@ -2853,8 +3353,7 @@ | |
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | |
- /* | |
- * Determine HW Sector size: Read BERASE bits of hw flash status | |
+ /* Determine HW Sector size: Read BERASE bits of hw flash status | |
* register | |
* 00: The Hw sector is 256 bytes, hence we need to erase 16 | |
* consecutive sectors. The start index for the nth Hw sector | |
@@ -2892,44 +3391,42 @@ | |
flash_linear_addr = hw->nvm.flash_base_addr; | |
flash_linear_addr += (bank) ? flash_bank_size : 0; | |
- for (j = 0; j < iteration ; j++) { | |
+ for (j = 0; j < iteration; j++) { | |
do { | |
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; | |
+ | |
/* Steps */ | |
ret_val = e1000_flash_cycle_init_ich8lan(hw); | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Write a value 11 (block Erase) in Flash | |
+ /* Write a value 11 (block Erase) in Flash | |
* Cycle field in hw flash control | |
*/ | |
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | |
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; | |
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | |
- /* | |
- * Write the last 24 bits of an index within the | |
+ /* Write the last 24 bits of an index within the | |
* block into Flash Linear address field in Flash | |
* Address. | |
*/ | |
flash_linear_addr += (j * sector_size); | |
ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | |
- ret_val = e1000_flash_cycle_ich8lan(hw, | |
- ICH_FLASH_ERASE_COMMAND_TIMEOUT); | |
- if (ret_val == 0) | |
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout); | |
+ if (!ret_val) | |
break; | |
- /* | |
- * Check if FCERR is set to 1. If 1, | |
+ /* Check if FCERR is set to 1. If 1, | |
* clear it and try the whole sequence | |
* a few more times else Done | |
*/ | |
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | |
- if (hsfsts.hsf_status.flcerr == 1) | |
+ if (hsfsts.hsf_status.flcerr) | |
/* repeat for some time before giving up */ | |
continue; | |
- else if (hsfsts.hsf_status.flcdone == 0) | |
+ else if (!hsfsts.hsf_status.flcdone) | |
return ret_val; | |
} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); | |
} | |
@@ -2956,8 +3453,7 @@ | |
return ret_val; | |
} | |
- if (*data == ID_LED_RESERVED_0000 || | |
- *data == ID_LED_RESERVED_FFFF) | |
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) | |
*data = ID_LED_DEFAULT_ICH8LAN; | |
return 0; | |
@@ -2972,7 +3468,7 @@ | |
* | |
* PCH also does not have an "always on" or "always off" mode which | |
* complicates the ID feature. Instead of using the "on" mode to indicate | |
- * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()), | |
+ * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), | |
* use "link_up" mode. The LEDs will still ID on request if there is no | |
* link based on logic in e1000_led_[on|off]_pchlan(). | |
**/ | |
@@ -2987,7 +3483,7 @@ | |
/* Get default ID LED modes */ | |
ret_val = hw->nvm.ops.valid_led_default(hw, &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
mac->ledctl_default = er32(LEDCTL); | |
mac->ledctl_mode1 = mac->ledctl_default; | |
@@ -3032,8 +3528,7 @@ | |
} | |
} | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -3050,8 +3545,7 @@ | |
ret_val = e1000e_get_bus_info_pcie(hw); | |
- /* | |
- * ICH devices are "PCI Express"-ish. They have | |
+ /* ICH devices are "PCI Express"-ish. They have | |
* a configuration space, but do not contain | |
* PCI Express Capability registers, so bus width | |
* must be hardcoded. | |
@@ -3072,12 +3566,11 @@ | |
static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |
{ | |
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | |
- u16 reg; | |
- u32 ctrl, kab; | |
+ u16 kum_cfg; | |
+ u32 ctrl, reg; | |
s32 ret_val; | |
- /* | |
- * Prevent the PCI-E bus from sticking if there is no TLP connection | |
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection | |
* on the last TLP read/write transaction when MAC is reset. | |
*/ | |
ret_val = e1000e_disable_pcie_master(hw); | |
@@ -3087,8 +3580,7 @@ | |
e_dbg("Masking off all interrupts\n"); | |
ew32(IMC, 0xffffffff); | |
- /* | |
- * Disable the Transmit and Receive units. Then delay to allow | |
+ /* Disable the Transmit and Receive units. Then delay to allow | |
* any pending transactions to complete before we hit the MAC | |
* with the global reset. | |
*/ | |
@@ -3107,12 +3599,12 @@ | |
} | |
if (hw->mac.type == e1000_pchlan) { | |
- /* Save the NVM K1 bit setting*/ | |
- ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); | |
+ /* Save the NVM K1 bit setting */ | |
+ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); | |
if (ret_val) | |
return ret_val; | |
- if (reg & E1000_NVM_K1_ENABLE) | |
+ if (kum_cfg & E1000_NVM_K1_ENABLE) | |
dev_spec->nvm_k1_enabled = true; | |
else | |
dev_spec->nvm_k1_enabled = false; | |
@@ -3120,16 +3612,14 @@ | |
ctrl = er32(CTRL); | |
- if (!e1000_check_reset_block(hw)) { | |
- /* | |
- * Full-chip reset requires MAC and PHY reset at the same | |
+ if (!hw->phy.ops.check_reset_block(hw)) { | |
+ /* Full-chip reset requires MAC and PHY reset at the same | |
* time to make sure the interface between MAC and the | |
* external PHY is reset. | |
*/ | |
ctrl |= E1000_CTRL_PHY_RST; | |
- /* | |
- * Gate automatic PHY configuration by hardware on | |
+ /* Gate automatic PHY configuration by hardware on | |
* non-managed 82579 | |
*/ | |
if ((hw->mac.type == e1000_pch2lan) && | |
@@ -3142,21 +3632,28 @@ | |
/* cannot issue a flush here because it hangs the hardware */ | |
msleep(20); | |
+ /* Set Phy Config Counter to 50msec */ | |
+ if (hw->mac.type == e1000_pch2lan) { | |
+ reg = er32(FEXTNVM3); | |
+ reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; | |
+ reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; | |
+ ew32(FEXTNVM3, reg); | |
+ } | |
+ | |
if (!ret_val) | |
clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); | |
if (ctrl & E1000_CTRL_PHY_RST) { | |
ret_val = hw->phy.ops.get_cfg_done(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1000_post_phy_reset_ich8lan(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
- /* | |
- * For PCH, this write will make sure that any noise | |
+ /* For PCH, this write will make sure that any noise | |
* will be detected as a CRC error and be dropped rather than show up | |
* as a bad packet to the DMA engine. | |
*/ | |
@@ -3166,12 +3663,11 @@ | |
ew32(IMC, 0xffffffff); | |
er32(ICR); | |
- kab = er32(KABGTXD); | |
- kab |= E1000_KABGTXD_BGSQLBIAS; | |
- ew32(KABGTXD, kab); | |
+ reg = er32(KABGTXD); | |
+ reg |= E1000_KABGTXD_BGSQLBIAS; | |
+ ew32(KABGTXD, reg); | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -3197,9 +3693,9 @@ | |
/* Initialize identification LED */ | |
ret_val = mac->ops.id_led_init(hw); | |
+ /* An error is not fatal and we should not stop init due to this */ | |
if (ret_val) | |
e_dbg("Error initializing identification LED\n"); | |
- /* This is not fatal and we should not stop init due to this */ | |
/* Setup the receive address. */ | |
e1000e_init_rx_addrs(hw, mac->rar_entry_count); | |
@@ -3209,8 +3705,7 @@ | |
for (i = 0; i < mac->mta_reg_count; i++) | |
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | |
- /* | |
- * The 82578 Rx buffer will stall if wakeup is enabled in host and | |
+ /* The 82578 Rx buffer will stall if wakeup is enabled in host and | |
* the ME. Disable wakeup by clearing the host wakeup bit. | |
* Reset the phy after disabling host wakeup to reset the Rx buffer. | |
*/ | |
@@ -3224,46 +3719,45 @@ | |
} | |
/* Setup link and flow control */ | |
- ret_val = e1000_setup_link_ich8lan(hw); | |
+ ret_val = mac->ops.setup_link(hw); | |
/* Set the transmit descriptor write-back policy for both queues */ | |
txdctl = er32(TXDCTL(0)); | |
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | | |
- E1000_TXDCTL_FULL_TX_DESC_WB; | |
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | | |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH; | |
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | | |
+ E1000_TXDCTL_FULL_TX_DESC_WB); | |
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | | |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH); | |
ew32(TXDCTL(0), txdctl); | |
txdctl = er32(TXDCTL(1)); | |
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | | |
- E1000_TXDCTL_FULL_TX_DESC_WB; | |
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | | |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH; | |
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | | |
+ E1000_TXDCTL_FULL_TX_DESC_WB); | |
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | | |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH); | |
ew32(TXDCTL(1), txdctl); | |
- /* | |
- * ICH8 has opposite polarity of no_snoop bits. | |
+ /* ICH8 has opposite polarity of no_snoop bits. | |
* By default, we should use snoop behavior. | |
*/ | |
if (mac->type == e1000_ich8lan) | |
snoop = PCIE_ICH8_SNOOP_ALL; | |
else | |
- snoop = (u32) ~(PCIE_NO_SNOOP_ALL); | |
+ snoop = (u32)~(PCIE_NO_SNOOP_ALL); | |
e1000e_set_pcie_no_snoop(hw, snoop); | |
ctrl_ext = er32(CTRL_EXT); | |
ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | |
ew32(CTRL_EXT, ctrl_ext); | |
- /* | |
- * Clear all of the statistics registers (clear on read). It is | |
+ /* Clear all of the statistics registers (clear on read). It is | |
* important that we do this after we have tried to establish link | |
* because the symbol error count will increment wildly if there | |
* is no link. | |
*/ | |
e1000_clear_hw_cntrs_ich8lan(hw); | |
- return 0; | |
+ return ret_val; | |
} | |
+ | |
/** | |
* e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits | |
* @hw: pointer to the HW structure | |
@@ -3316,13 +3810,29 @@ | |
ew32(STATUS, reg); | |
} | |
- /* | |
- * work-around descriptor data corruption issue during nfs v2 udp | |
+ /* work-around descriptor data corruption issue during nfs v2 udp | |
* traffic, just disable the nfs filtering capability | |
*/ | |
reg = er32(RFCTL); | |
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); | |
+ | |
+ /* Disable IPv6 extension header parsing because some malformed | |
+ * IPv6 headers can hang the Rx. | |
+ */ | |
+ if (hw->mac.type == e1000_ich8lan) | |
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); | |
ew32(RFCTL, reg); | |
+ | |
+ /* Enable ECC on Lynxpoint */ | |
+ if (hw->mac.type == e1000_pch_lpt) { | |
+ reg = er32(PBECCSTS); | |
+ reg |= E1000_PBECCSTS_ECC_ENABLE; | |
+ ew32(PBECCSTS, reg); | |
+ | |
+ reg = er32(CTRL); | |
+ reg |= E1000_CTRL_MEHE; | |
+ ew32(CTRL, reg); | |
+ } | |
} | |
/** | |
@@ -3339,11 +3849,10 @@ | |
{ | |
s32 ret_val; | |
- if (e1000_check_reset_block(hw)) | |
+ if (hw->phy.ops.check_reset_block(hw)) | |
return 0; | |
- /* | |
- * ICH parts do not have a word in the NVM to determine | |
+ /* ICH parts do not have a word in the NVM to determine | |
* the default flow control setting, so we explicitly | |
* set it to full. | |
*/ | |
@@ -3355,23 +3864,22 @@ | |
hw->fc.requested_mode = e1000_fc_full; | |
} | |
- /* | |
- * Save off the requested flow control mode for use later. Depending | |
+ /* Save off the requested flow control mode for use later. Depending | |
* on the link partner's capabilities, we may or may not use this mode. | |
*/ | |
hw->fc.current_mode = hw->fc.requested_mode; | |
- e_dbg("After fix-ups FlowControl is now = %x\n", | |
- hw->fc.current_mode); | |
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); | |
/* Continue to configure the copper link. */ | |
- ret_val = e1000_setup_copper_link_ich8lan(hw); | |
+ ret_val = hw->mac.ops.setup_physical_interface(hw); | |
if (ret_val) | |
return ret_val; | |
ew32(FCTTV, hw->fc.pause_time); | |
if ((hw->phy.type == e1000_phy_82578) || | |
(hw->phy.type == e1000_phy_82579) || | |
+ (hw->phy.type == e1000_phy_i217) || | |
(hw->phy.type == e1000_phy_82577)) { | |
ew32(FCRTV_PCH, hw->fc.refresh_time); | |
@@ -3403,8 +3911,7 @@ | |
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | |
ew32(CTRL, ctrl); | |
- /* | |
- * Set the mac to wait the maximum time between each iteration | |
+ /* Set the mac to wait the maximum time between each iteration | |
* and increase the max iterations when polling the phy; | |
* this fixes erroneous timeouts at 10Mbps. | |
*/ | |
@@ -3412,12 +3919,12 @@ | |
if (ret_val) | |
return ret_val; | |
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | |
- ®_data); | |
+ ®_data); | |
if (ret_val) | |
return ret_val; | |
reg_data |= 0x3F; | |
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | |
- reg_data); | |
+ reg_data); | |
if (ret_val) | |
return ret_val; | |
@@ -3465,6 +3972,32 @@ | |
default: | |
break; | |
} | |
+ | |
+ return e1000e_setup_copper_link(hw); | |
+} | |
+ | |
+/** | |
+ * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface | |
+ * @hw: pointer to the HW structure | |
+ * | |
+ * Calls the PHY specific link setup function and then calls the | |
+ * generic setup_copper_link to finish configuring the link for | |
+ * Lynxpoint PCH devices | |
+ **/ | |
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) | |
+{ | |
+ u32 ctrl; | |
+ s32 ret_val; | |
+ | |
+ ctrl = er32(CTRL); | |
+ ctrl |= E1000_CTRL_SLU; | |
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | |
+ ew32(CTRL, ctrl); | |
+ | |
+ ret_val = e1000_copper_link_setup_82577(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
return e1000e_setup_copper_link(hw); | |
} | |
@@ -3488,8 +4021,7 @@ | |
return ret_val; | |
if ((hw->mac.type == e1000_ich8lan) && | |
- (hw->phy.type == e1000_phy_igp_3) && | |
- (*speed == SPEED_1000)) { | |
+ (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { | |
ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); | |
} | |
@@ -3522,8 +4054,7 @@ | |
if (!dev_spec->kmrn_lock_loss_workaround_enabled) | |
return 0; | |
- /* | |
- * Make sure link is up before proceeding. If not just return. | |
+ /* Make sure link is up before proceeding. If not just return. | |
* Attempting this while link is negotiating fouled up link | |
* stability | |
*/ | |
@@ -3555,8 +4086,7 @@ | |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | |
ew32(PHY_CTRL, phy_ctrl); | |
- /* | |
- * Call gig speed drop workaround on Gig disable before accessing | |
+ /* Call gig speed drop workaround on Gig disable before accessing | |
* any PHY registers | |
*/ | |
e1000e_gig_downshift_workaround_ich8lan(hw); | |
@@ -3566,7 +4096,7 @@ | |
} | |
/** | |
- * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state | |
+ * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state | |
* @hw: pointer to the HW structure | |
* @state: boolean value used to set the current Kumeran workaround state | |
* | |
@@ -3574,7 +4104,7 @@ | |
* /disabled - false). | |
**/ | |
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |
- bool state) | |
+ bool state) | |
{ | |
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | |
@@ -3600,7 +4130,7 @@ | |
{ | |
u32 reg; | |
u16 data; | |
- u8 retry = 0; | |
+ u8 retry = 0; | |
if (hw->phy.type != e1000_phy_igp_3) | |
return; | |
@@ -3613,8 +4143,7 @@ | |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | |
ew32(PHY_CTRL, reg); | |
- /* | |
- * Call gig speed drop workaround on Gig disable before | |
+ /* Call gig speed drop workaround on Gig disable before | |
* accessing any PHY registers | |
*/ | |
if (hw->mac.type == e1000_ich8lan) | |
@@ -3657,17 +4186,16 @@ | |
return; | |
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, | |
- ®_data); | |
+ ®_data); | |
if (ret_val) | |
return; | |
reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; | |
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, | |
- reg_data); | |
+ reg_data); | |
if (ret_val) | |
return; | |
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; | |
- ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, | |
- reg_data); | |
+ e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); | |
} | |
/** | |
@@ -3676,17 +4204,98 @@ | |
* | |
* During S0 to Sx transition, it is possible the link remains at gig | |
* instead of negotiating to a lower speed. Before going to Sx, set | |
- * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation | |
- * to a lower speed. For PCH and newer parts, the OEM bits PHY register | |
- * (LED, GbE disable and LPLU configurations) also needs to be written. | |
+ * 'Gig Disable' to force link speed negotiation to a lower speed based on | |
+ * the LPLU setting in the NVM or custom setting. For PCH and newer parts, | |
+ * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also | |
+ * needs to be written. | |
+ * Parts that support (and are linked to a partner which support) EEE in | |
+ * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power | |
+ * than 10Mbps w/o EEE. | |
**/ | |
void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) | |
{ | |
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | |
u32 phy_ctrl; | |
s32 ret_val; | |
phy_ctrl = er32(PHY_CTRL); | |
- phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; | |
+ phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; | |
+ | |
+ if (hw->phy.type == e1000_phy_i217) { | |
+ u16 phy_reg, device_id = hw->adapter->pdev->device; | |
+ | |
+ if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || | |
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || | |
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) || | |
+ (device_id == E1000_DEV_ID_PCH_I218_V3)) { | |
+ u32 fextnvm6 = er32(FEXTNVM6); | |
+ | |
+ ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); | |
+ } | |
+ | |
+ ret_val = hw->phy.ops.acquire(hw); | |
+ if (ret_val) | |
+ goto out; | |
+ | |
+ if (!dev_spec->eee_disable) { | |
+ u16 eee_advert; | |
+ | |
+ ret_val = | |
+ e1000_read_emi_reg_locked(hw, | |
+ I217_EEE_ADVERTISEMENT, | |
+ &eee_advert); | |
+ if (ret_val) | |
+ goto release; | |
+ | |
+ /* Disable LPLU if both link partners support 100BaseT | |
+ * EEE and 100Full is advertised on both ends of the | |
+ * link. | |
+ */ | |
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) && | |
+ (dev_spec->eee_lp_ability & | |
+ I82579_EEE_100_SUPPORTED) && | |
+ (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) | |
+ phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | | |
+ E1000_PHY_CTRL_NOND0A_LPLU); | |
+ } | |
+ | |
+ /* For i217 Intel Rapid Start Technology support, | |
+ * when the system is going into Sx and no manageability engine | |
+ * is present, the driver must configure proxy to reset only on | |
+ * power good. LPI (Low Power Idle) state must also reset only | |
+ * on power good, as well as the MTA (Multicast table array). | |
+ * The SMBus release must also be disabled on LCD reset. | |
+ */ | |
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { | |
+ /* Enable proxy to reset only on power good. */ | |
+ e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); | |
+ phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; | |
+ e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); | |
+ | |
+ /* Set bit enable LPI (EEE) to reset only on | |
+ * power good. | |
+ */ | |
+ e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); | |
+ phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; | |
+ e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); | |
+ | |
+ /* Disable the SMB release on LCD reset. */ | |
+ e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); | |
+ phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; | |
+ e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); | |
+ } | |
+ | |
+ /* Enable MTA to reset for Intel Rapid Start Technology | |
+ * Support | |
+ */ | |
+ e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); | |
+ phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; | |
+ e1e_wphy_locked(hw, I217_CGFREG, phy_reg); | |
+ | |
+release: | |
+ hw->phy.ops.release(hw); | |
+ } | |
+out: | |
ew32(PHY_CTRL, phy_ctrl); | |
if (hw->mac.type == e1000_ich8lan) | |
@@ -3694,7 +4303,11 @@ | |
if (hw->mac.type >= e1000_pchlan) { | |
e1000_oem_bits_config_ich8lan(hw, false); | |
- e1000_phy_hw_reset_ich8lan(hw); | |
+ | |
+ /* Reset PHY to activate OEM bits on 82577/8 */ | |
+ if (hw->mac.type == e1000_pchlan) | |
+ e1000e_phy_hw_reset_generic(hw); | |
+ | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
return; | |
@@ -3711,50 +4324,59 @@ | |
* on which PHY resets are not blocked, if the PHY registers cannot be | |
* accessed properly by the s/w toggle the LANPHYPC value to power cycle | |
* the PHY. | |
+ * On i217, setup Intel Rapid Start Technology. | |
**/ | |
void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) | |
{ | |
- u32 fwsm; | |
+ s32 ret_val; | |
- if (hw->mac.type != e1000_pch2lan) | |
+ if (hw->mac.type < e1000_pch2lan) | |
return; | |
- fwsm = er32(FWSM); | |
- if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { | |
- u16 phy_id1, phy_id2; | |
- s32 ret_val; | |
+ ret_val = e1000_init_phy_workarounds_pchlan(hw); | |
+ if (ret_val) { | |
+ e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); | |
+ return; | |
+ } | |
+ | |
+ /* For i217 Intel Rapid Start Technology support when the system | |
+ * is transitioning from Sx and no manageability engine is present | |
+ * configure SMBus to restore on reset, disable proxy, and enable | |
+ * the reset on MTA (Multicast table array). | |
+ */ | |
+ if (hw->phy.type == e1000_phy_i217) { | |
+ u16 phy_reg; | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) { | |
- e_dbg("Failed to acquire PHY semaphore in resume\n"); | |
+ e_dbg("Failed to setup iRST\n"); | |
return; | |
} | |
- /* Test access to the PHY registers by reading the ID regs */ | |
- ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); | |
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { | |
+ /* Restore clear on SMB if no manageability engine | |
+ * is present | |
+ */ | |
+ ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); | |
+ if (ret_val) | |
+ goto release; | |
+ phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; | |
+ e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); | |
+ | |
+ /* Disable Proxy */ | |
+ e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); | |
+ } | |
+ /* Enable reset on MTA */ | |
+ ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); | |
if (ret_val) | |
goto release; | |
- ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); | |
+ phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; | |
+ e1e_wphy_locked(hw, I217_CGFREG, phy_reg); | |
+release: | |
if (ret_val) | |
- goto release; | |
- | |
- if (hw->phy.id == ((u32)(phy_id1 << 16) | | |
- (u32)(phy_id2 & PHY_REVISION_MASK))) | |
- goto release; | |
- | |
- e1000_toggle_lanphypc_value_ich8lan(hw); | |
- | |
+ e_dbg("Error %d in resume workarounds\n", ret_val); | |
hw->phy.ops.release(hw); | |
- msleep(50); | |
- e1000_phy_hw_reset(hw); | |
- msleep(50); | |
- return; | |
} | |
- | |
-release: | |
- hw->phy.ops.release(hw); | |
- | |
- return; | |
} | |
/** | |
@@ -3838,8 +4460,7 @@ | |
u16 data = (u16)hw->mac.ledctl_mode2; | |
u32 i, led; | |
- /* | |
- * If no link, then turn LED on by setting the invert bit | |
+ /* If no link, then turn LED on by setting the invert bit | |
* for each LED that's mode is "link_up" in ledctl_mode2. | |
*/ | |
if (!(er32(STATUS) & E1000_STATUS_LU)) { | |
@@ -3869,8 +4490,7 @@ | |
u16 data = (u16)hw->mac.ledctl_mode1; | |
u32 i, led; | |
- /* | |
- * If no link, then turn LED off by clearing the invert bit | |
+ /* If no link, then turn LED off by clearing the invert bit | |
* for each LED that's mode is "link_up" in ledctl_mode1. | |
*/ | |
if (!(er32(STATUS) & E1000_STATUS_LU)) { | |
@@ -3907,7 +4527,7 @@ | |
u32 bank = 0; | |
u32 status; | |
- e1000e_get_cfg_done(hw); | |
+ e1000e_get_cfg_done_generic(hw); | |
/* Wait for indication from h/w that it has completed basic config */ | |
if (hw->mac.type >= e1000_ich10lan) { | |
@@ -3915,8 +4535,7 @@ | |
} else { | |
ret_val = e1000e_get_auto_rd_done(hw); | |
if (ret_val) { | |
- /* | |
- * When auto config read does not complete, do not | |
+ /* When auto config read does not complete, do not | |
* return with an error. This can happen in situations | |
* where there is no eeprom and prevents getting link. | |
*/ | |
@@ -3934,7 +4553,7 @@ | |
/* If EEPROM is not marked present, init the IGP 3 PHY manually */ | |
if (hw->mac.type <= e1000_ich9lan) { | |
- if (((er32(EECD) & E1000_EECD_PRES) == 0) && | |
+ if (!(er32(EECD) & E1000_EECD_PRES) && | |
(hw->phy.type == e1000_phy_igp_3)) { | |
e1000e_phy_init_script_igp3(hw); | |
} | |
@@ -3995,6 +4614,7 @@ | |
/* Clear PHY statistics registers */ | |
if ((hw->phy.type == e1000_phy_82578) || | |
(hw->phy.type == e1000_phy_82579) || | |
+ (hw->phy.type == e1000_phy_i217) || | |
(hw->phy.type == e1000_phy_82577)) { | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
@@ -4023,7 +4643,6 @@ | |
} | |
static const struct e1000_mac_operations ich8_mac_ops = { | |
- .id_led_init = e1000e_id_led_init, | |
/* check_mng_mode dependent on mac type */ | |
.check_for_link = e1000_check_for_copper_link_ich8lan, | |
/* cleanup_led dependent on mac type */ | |
@@ -4037,8 +4656,10 @@ | |
.reset_hw = e1000_reset_hw_ich8lan, | |
.init_hw = e1000_init_hw_ich8lan, | |
.setup_link = e1000_setup_link_ich8lan, | |
- .setup_physical_interface= e1000_setup_copper_link_ich8lan, | |
+ .setup_physical_interface = e1000_setup_copper_link_ich8lan, | |
/* id_led_init dependent on mac type */ | |
+ .config_collision_dist = e1000e_config_collision_dist_generic, | |
+ .rar_set = e1000e_rar_set_generic, | |
}; | |
static const struct e1000_phy_operations ich8_phy_ops = { | |
@@ -4057,8 +4678,9 @@ | |
static const struct e1000_nvm_operations ich8_nvm_ops = { | |
.acquire = e1000_acquire_nvm_ich8lan, | |
- .read = e1000_read_nvm_ich8lan, | |
+ .read = e1000_read_nvm_ich8lan, | |
.release = e1000_release_nvm_ich8lan, | |
+ .reload = e1000e_reload_nvm_generic, | |
.update = e1000_update_nvm_checksum_ich8lan, | |
.valid_led_default = e1000_valid_led_default_ich8lan, | |
.validate = e1000_validate_nvm_checksum_ich8lan, | |
@@ -4088,10 +4710,9 @@ | |
| FLAG_HAS_WOL | |
| FLAG_HAS_CTRLEXT_ON_LOAD | |
| FLAG_HAS_AMT | |
- | FLAG_HAS_ERT | |
| FLAG_HAS_FLASH | |
| FLAG_APME_IN_WUC, | |
- .pba = 10, | |
+ .pba = 18, | |
.max_hw_frame_size = DEFAULT_JUMBO, | |
.get_variants = e1000_get_variants_ich8lan, | |
.mac_ops = &ich8_mac_ops, | |
@@ -4106,10 +4727,9 @@ | |
| FLAG_HAS_WOL | |
| FLAG_HAS_CTRLEXT_ON_LOAD | |
| FLAG_HAS_AMT | |
- | FLAG_HAS_ERT | |
| FLAG_HAS_FLASH | |
| FLAG_APME_IN_WUC, | |
- .pba = 10, | |
+ .pba = 18, | |
.max_hw_frame_size = DEFAULT_JUMBO, | |
.get_variants = e1000_get_variants_ich8lan, | |
.mac_ops = &ich8_mac_ops, | |
@@ -4140,6 +4760,7 @@ | |
.mac = e1000_pch2lan, | |
.flags = FLAG_IS_ICH | |
| FLAG_HAS_WOL | |
+ | FLAG_HAS_HW_TIMESTAMP | |
| FLAG_HAS_CTRLEXT_ON_LOAD | |
| FLAG_HAS_AMT | |
| FLAG_HAS_FLASH | |
@@ -4148,7 +4769,27 @@ | |
.flags2 = FLAG2_HAS_PHY_STATS | |
| FLAG2_HAS_EEE, | |
.pba = 26, | |
- .max_hw_frame_size = DEFAULT_JUMBO, | |
+ .max_hw_frame_size = 9018, | |
+ .get_variants = e1000_get_variants_ich8lan, | |
+ .mac_ops = &ich8_mac_ops, | |
+ .phy_ops = &ich8_phy_ops, | |
+ .nvm_ops = &ich8_nvm_ops, | |
+}; | |
+ | |
+const struct e1000_info e1000_pch_lpt_info = { | |
+ .mac = e1000_pch_lpt, | |
+ .flags = FLAG_IS_ICH | |
+ | FLAG_HAS_WOL | |
+ | FLAG_HAS_HW_TIMESTAMP | |
+ | FLAG_HAS_CTRLEXT_ON_LOAD | |
+ | FLAG_HAS_AMT | |
+ | FLAG_HAS_FLASH | |
+ | FLAG_HAS_JUMBO_FRAMES | |
+ | FLAG_APME_IN_WUC, | |
+ .flags2 = FLAG2_HAS_PHY_STATS | |
+ | FLAG2_HAS_EEE, | |
+ .pba = 26, | |
+ .max_hw_frame_size = 9018, | |
.get_variants = e1000_get_variants_ich8lan, | |
.mac_ops = &ich8_mac_ops, | |
.phy_ops = &ich8_phy_ops, | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: ich8lan.h | |
Only in e1000e: lib.c | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: mac.c | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: mac.h | |
diff -ru e1000e/Makefile /home/arch/linux/drivers/net/ethernet/intel/e1000e/Makefile | |
--- e1000e/Makefile 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/Makefile 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
################################################################################ | |
# | |
# Intel PRO/1000 Linux driver | |
-# Copyright(c) 1999 - 2011 Intel Corporation. | |
+# Copyright(c) 1999 - 2013 Intel Corporation. | |
# | |
# This program is free software; you can redistribute it and/or modify it | |
# under the terms and conditions of the GNU General Public License, | |
@@ -33,5 +33,6 @@ | |
obj-$(CONFIG_E1000E) += e1000e.o | |
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ | |
- lib.o phy.o param.o ethtool.o netdev.o | |
+ mac.o manage.o nvm.o phy.o \ | |
+ param.o ethtool.o netdev.o ptp.o | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: manage.c | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: manage.h | |
diff -ru e1000e/netdev.c /home/arch/linux/drivers/net/ethernet/intel/e1000e/netdev.c | |
--- e1000e/netdev.c 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/netdev.c 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -42,7 +42,6 @@ | |
#include <linux/slab.h> | |
#include <net/checksum.h> | |
#include <net/ip6_checksum.h> | |
-#include <linux/mii.h> | |
#include <linux/ethtool.h> | |
#include <linux/if_vlan.h> | |
#include <linux/cpu.h> | |
@@ -56,11 +55,14 @@ | |
#define DRV_EXTRAVERSION "-k" | |
-#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION | |
+#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION | |
char e1000e_driver_name[] = "e1000e"; | |
const char e1000e_driver_version[] = DRV_VERSION; | |
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); | |
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) | |
+static int debug = -1; | |
+module_param(debug, int, 0); | |
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
static const struct e1000_info *e1000_info_tbl[] = { | |
[board_82571] = &e1000_82571_info, | |
@@ -74,6 +76,7 @@ | |
[board_ich10lan] = &e1000_ich10_info, | |
[board_pchlan] = &e1000_pch_info, | |
[board_pch2lan] = &e1000_pch2_info, | |
+ [board_pch_lpt] = &e1000_pch_lpt_info, | |
}; | |
struct e1000_reg_info { | |
@@ -81,20 +84,7 @@ | |
char *name; | |
}; | |
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ | |
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ | |
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ | |
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ | |
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ | |
- | |
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ | |
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ | |
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ | |
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ | |
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ | |
- | |
static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |
- | |
/* General Registers */ | |
{E1000_CTRL, "CTRL"}, | |
{E1000_STATUS, "STATUS"}, | |
@@ -105,14 +95,14 @@ | |
/* Rx Registers */ | |
{E1000_RCTL, "RCTL"}, | |
- {E1000_RDLEN, "RDLEN"}, | |
- {E1000_RDH, "RDH"}, | |
- {E1000_RDT, "RDT"}, | |
+ {E1000_RDLEN(0), "RDLEN"}, | |
+ {E1000_RDH(0), "RDH"}, | |
+ {E1000_RDT(0), "RDT"}, | |
{E1000_RDTR, "RDTR"}, | |
{E1000_RXDCTL(0), "RXDCTL"}, | |
{E1000_ERT, "ERT"}, | |
- {E1000_RDBAL, "RDBAL"}, | |
- {E1000_RDBAH, "RDBAH"}, | |
+ {E1000_RDBAL(0), "RDBAL"}, | |
+ {E1000_RDBAH(0), "RDBAH"}, | |
{E1000_RDFH, "RDFH"}, | |
{E1000_RDFT, "RDFT"}, | |
{E1000_RDFHS, "RDFHS"}, | |
@@ -121,11 +111,11 @@ | |
/* Tx Registers */ | |
{E1000_TCTL, "TCTL"}, | |
- {E1000_TDBAL, "TDBAL"}, | |
- {E1000_TDBAH, "TDBAH"}, | |
- {E1000_TDLEN, "TDLEN"}, | |
- {E1000_TDH, "TDH"}, | |
- {E1000_TDT, "TDT"}, | |
+ {E1000_TDBAL(0), "TDBAL"}, | |
+ {E1000_TDBAH(0), "TDBAH"}, | |
+ {E1000_TDLEN(0), "TDLEN"}, | |
+ {E1000_TDH(0), "TDH"}, | |
+ {E1000_TDT(0), "TDT"}, | |
{E1000_TIDV, "TIDV"}, | |
{E1000_TXDCTL(0), "TXDCTL"}, | |
{E1000_TADV, "TADV"}, | |
@@ -137,12 +127,14 @@ | |
{E1000_TDFPC, "TDFPC"}, | |
/* List Terminator */ | |
- {} | |
+ {0, NULL} | |
}; | |
-/* | |
+/** | |
* e1000_regdump - register printout routine | |
- */ | |
+ * @hw: pointer to the HW structure | |
+ * @reginfo: pointer to the register info table | |
+ **/ | |
static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |
{ | |
int n = 0; | |
@@ -172,9 +164,28 @@ | |
pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); | |
} | |
-/* | |
+static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, | |
+ struct e1000_buffer *bi) | |
+{ | |
+ int i; | |
+ struct e1000_ps_page *ps_page; | |
+ | |
+ for (i = 0; i < adapter->rx_ps_pages; i++) { | |
+ ps_page = &bi->ps_pages[i]; | |
+ | |
+ if (ps_page->page) { | |
+ pr_info("packet dump for ps_page %d:\n", i); | |
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | |
+ 16, 1, page_address(ps_page->page), | |
+ PAGE_SIZE, true); | |
+ } | |
+ } | |
+} | |
+ | |
+/** | |
* e1000e_dump - Print registers, Tx-ring and Rx-ring | |
- */ | |
+ * @adapter: board private structure | |
+ **/ | |
static void e1000e_dump(struct e1000_adapter *adapter) | |
{ | |
struct net_device *netdev = adapter->netdev; | |
@@ -183,18 +194,18 @@ | |
struct e1000_ring *tx_ring = adapter->tx_ring; | |
struct e1000_tx_desc *tx_desc; | |
struct my_u0 { | |
- u64 a; | |
- u64 b; | |
+ __le64 a; | |
+ __le64 b; | |
} *u0; | |
struct e1000_buffer *buffer_info; | |
struct e1000_ring *rx_ring = adapter->rx_ring; | |
union e1000_rx_desc_packet_split *rx_desc_ps; | |
union e1000_rx_desc_extended *rx_desc; | |
struct my_u1 { | |
- u64 a; | |
- u64 b; | |
- u64 c; | |
- u64 d; | |
+ __le64 a; | |
+ __le64 b; | |
+ __le64 c; | |
+ __le64 d; | |
} *u1; | |
u32 staterr; | |
int i = 0; | |
@@ -206,9 +217,8 @@ | |
if (netdev) { | |
dev_info(&adapter->pdev->dev, "Net device Info\n"); | |
pr_info("Device Name state trans_start last_rx\n"); | |
- pr_info("%-15s %016lX %016lX %016lX\n", | |
- netdev->name, netdev->state, netdev->trans_start, | |
- netdev->last_rx); | |
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, | |
+ netdev->state, netdev->trans_start, netdev->last_rx); | |
} | |
/* Print Registers */ | |
@@ -221,7 +231,7 @@ | |
/* Print Tx Ring Summary */ | |
if (!netdev || !netif_running(netdev)) | |
- goto exit; | |
+ return; | |
dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); | |
pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); | |
@@ -293,10 +303,10 @@ | |
(unsigned long long)buffer_info->time_stamp, | |
buffer_info->skb, next_desc); | |
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) | |
+ if (netif_msg_pktdata(adapter) && buffer_info->skb) | |
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | |
- 16, 1, phys_to_virt(buffer_info->dma), | |
- buffer_info->length, true); | |
+ 16, 1, buffer_info->skb->data, | |
+ buffer_info->skb->len, true); | |
} | |
/* Print Rx Ring Summary */ | |
@@ -308,7 +318,7 @@ | |
/* Print Rx Ring */ | |
if (!netif_msg_rx_status(adapter)) | |
- goto exit; | |
+ return; | |
dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); | |
switch (adapter->rx_ps_pages) { | |
@@ -375,10 +385,8 @@ | |
buffer_info->skb, next_desc); | |
if (netif_msg_pktdata(adapter)) | |
- print_hex_dump(KERN_INFO, "", | |
- DUMP_PREFIX_ADDRESS, 16, 1, | |
- phys_to_virt(buffer_info->dma), | |
- adapter->rx_ps_bsize0, true); | |
+ e1000e_dump_ps_pages(adapter, | |
+ buffer_info); | |
} | |
} | |
break; | |
@@ -438,20 +446,17 @@ | |
(unsigned long long)buffer_info->dma, | |
buffer_info->skb, next_desc); | |
- if (netif_msg_pktdata(adapter)) | |
+ if (netif_msg_pktdata(adapter) && | |
+ buffer_info->skb) | |
print_hex_dump(KERN_INFO, "", | |
DUMP_PREFIX_ADDRESS, 16, | |
1, | |
- phys_to_virt | |
- (buffer_info->dma), | |
+ buffer_info->skb->data, | |
adapter->rx_buffer_len, | |
true); | |
} | |
} | |
} | |
- | |
-exit: | |
- return; | |
} | |
/** | |
@@ -466,45 +471,117 @@ | |
} | |
/** | |
+ * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp | |
+ * @adapter: board private structure | |
+ * @hwtstamps: time stamp structure to update | |
+ * @systim: unsigned 64bit system time value. | |
+ * | |
+ * Convert the system time value stored in the RX/TXSTMP registers into a | |
+ * hwtstamp which can be used by the upper level time stamping functions. | |
+ * | |
+ * The 'systim_lock' spinlock is used to protect the consistency of the | |
+ * system time value. This is needed because reading the 64 bit time | |
+ * value involves reading two 32 bit registers. The first read latches the | |
+ * value. | |
+ **/ | |
+static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, | |
+ struct skb_shared_hwtstamps *hwtstamps, | |
+ u64 systim) | |
+{ | |
+ u64 ns; | |
+ unsigned long flags; | |
+ | |
+ spin_lock_irqsave(&adapter->systim_lock, flags); | |
+ ns = timecounter_cyc2time(&adapter->tc, systim); | |
+ spin_unlock_irqrestore(&adapter->systim_lock, flags); | |
+ | |
+ memset(hwtstamps, 0, sizeof(*hwtstamps)); | |
+ hwtstamps->hwtstamp = ns_to_ktime(ns); | |
+} | |
+ | |
+/** | |
+ * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp | |
+ * @adapter: board private structure | |
+ * @status: descriptor extended error and status field | |
+ * @skb: particular skb to include time stamp | |
+ * | |
+ * If the time stamp is valid, convert it into the timecounter ns value | |
+ * and store that result into the shhwtstamps structure which is passed | |
+ * up the network stack. | |
+ **/ | |
+static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, | |
+ struct sk_buff *skb) | |
+{ | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ u64 rxstmp; | |
+ | |
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || | |
+ !(status & E1000_RXDEXT_STATERR_TST) || | |
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) | |
+ return; | |
+ | |
+ /* The Rx time stamp registers contain the time stamp. No other | |
+ * received packet will be time stamped until the Rx time stamp | |
+ * registers are read. Because only one packet can be time stamped | |
+ * at a time, the register values must belong to this packet and | |
+ * therefore none of the other additional attributes need to be | |
+ * compared. | |
+ */ | |
+ rxstmp = (u64)er32(RXSTMPL); | |
+ rxstmp |= (u64)er32(RXSTMPH) << 32; | |
+ e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); | |
+ | |
+ adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; | |
+} | |
+ | |
+/** | |
* e1000_receive_skb - helper function to handle Rx indications | |
* @adapter: board private structure | |
- * @status: descriptor status field as written by hardware | |
+ * @staterr: descriptor extended error and status field as written by hardware | |
* @vlan: descriptor vlan field as written by hardware (no le/be conversion) | |
* @skb: pointer to sk_buff to be indicated to stack | |
**/ | |
static void e1000_receive_skb(struct e1000_adapter *adapter, | |
struct net_device *netdev, struct sk_buff *skb, | |
- u8 status, __le16 vlan) | |
+ u32 staterr, __le16 vlan) | |
{ | |
u16 tag = le16_to_cpu(vlan); | |
+ | |
+ e1000e_rx_hwtstamp(adapter, staterr, skb); | |
+ | |
skb->protocol = eth_type_trans(skb, netdev); | |
- if (status & E1000_RXD_STAT_VP) | |
- __vlan_hwaccel_put_tag(skb, tag); | |
+ if (staterr & E1000_RXD_STAT_VP) | |
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); | |
napi_gro_receive(&adapter->napi, skb); | |
} | |
/** | |
* e1000_rx_checksum - Receive Checksum Offload | |
- * @adapter: board private structure | |
- * @status_err: receive descriptor status and error fields | |
- * @csum: receive descriptor csum field | |
- * @sk_buff: socket buffer with received data | |
+ * @adapter: board private structure | |
+ * @status_err: receive descriptor status and error fields | |
+ * @csum: receive descriptor csum field | |
+ * @sk_buff: socket buffer with received data | |
**/ | |
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |
- u32 csum, struct sk_buff *skb) | |
+ struct sk_buff *skb) | |
{ | |
u16 status = (u16)status_err; | |
u8 errors = (u8)(status_err >> 24); | |
skb_checksum_none_assert(skb); | |
+ /* Rx checksum disabled */ | |
+ if (!(adapter->netdev->features & NETIF_F_RXCSUM)) | |
+ return; | |
+ | |
/* Ignore Checksum bit is set */ | |
if (status & E1000_RXD_STAT_IXSM) | |
return; | |
- /* TCP/UDP checksum error bit is set */ | |
- if (errors & E1000_RXD_ERR_TCPE) { | |
+ | |
+ /* TCP/UDP checksum error bit or IP checksum error bit is set */ | |
+ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { | |
/* let the stack verify checksum errors */ | |
adapter->hw_csum_err++; | |
return; | |
@@ -515,59 +592,19 @@ | |
return; | |
/* It must be a TCP or UDP packet with a valid checksum */ | |
- if (status & E1000_RXD_STAT_TCPCS) { | |
- /* TCP checksum is good */ | |
- skb->ip_summed = CHECKSUM_UNNECESSARY; | |
- } else { | |
- /* | |
- * IP fragment with UDP payload | |
- * Hardware complements the payload checksum, so we undo it | |
- * and then put the value in host order for further stack use. | |
- */ | |
- __sum16 sum = (__force __sum16)htons(csum); | |
- skb->csum = csum_unfold(~sum); | |
- skb->ip_summed = CHECKSUM_COMPLETE; | |
- } | |
+ skb->ip_summed = CHECKSUM_UNNECESSARY; | |
adapter->hw_csum_good++; | |
} | |
-/** | |
- * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() | |
- * @hw: pointer to the HW structure | |
- * @tail: address of tail descriptor register | |
- * @i: value to write to tail descriptor register | |
- * | |
- * When updating the tail register, the ME could be accessing Host CSR | |
- * registers at the same time. Normally, this is handled in h/w by an | |
- * arbiter but on some parts there is a bug that acknowledges Host accesses | |
- * later than it should which could result in the descriptor register to | |
- * have an incorrect value. Workaround this by checking the FWSM register | |
- * which has bit 24 set while ME is accessing Host CSR registers, wait | |
- * if it is set and try again a number of times. | |
- **/ | |
-static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, | |
- unsigned int i) | |
-{ | |
- unsigned int j = 0; | |
- | |
- while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && | |
- (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) | |
- udelay(50); | |
- | |
- writel(i, tail); | |
- | |
- if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) | |
- return E1000_ERR_SWFW_SYNC; | |
- | |
- return 0; | |
-} | |
- | |
-static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) | |
+static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) | |
{ | |
- u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct e1000_hw *hw = &adapter->hw; | |
+ s32 ret_val = __ew32_prepare(hw); | |
- if (e1000e_update_tail_wa(hw, tail, i)) { | |
+ writel(i, rx_ring->tail); | |
+ | |
+ if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { | |
u32 rctl = er32(RCTL); | |
ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
e_err("ME firmware caused invalid RDT - resetting\n"); | |
@@ -575,12 +612,15 @@ | |
} | |
} | |
-static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) | |
+static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) | |
{ | |
- u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
struct e1000_hw *hw = &adapter->hw; | |
+ s32 ret_val = __ew32_prepare(hw); | |
+ | |
+ writel(i, tx_ring->tail); | |
- if (e1000e_update_tail_wa(hw, tail, i)) { | |
+ if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { | |
u32 tctl = er32(TCTL); | |
ew32(TCTL, tctl & ~E1000_TCTL_EN); | |
e_err("ME firmware caused invalid TDT - resetting\n"); | |
@@ -590,14 +630,14 @@ | |
/** | |
* e1000_alloc_rx_buffers - Replace used receive buffers | |
- * @adapter: address of board private structure | |
+ * @rx_ring: Rx descriptor ring | |
**/ | |
-static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |
+static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, | |
int cleaned_count, gfp_t gfp) | |
{ | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct net_device *netdev = adapter->netdev; | |
struct pci_dev *pdev = adapter->pdev; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
union e1000_rx_desc_extended *rx_desc; | |
struct e1000_buffer *buffer_info; | |
struct sk_buff *skb; | |
@@ -636,17 +676,16 @@ | |
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | |
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | |
- /* | |
- * Force memory writes to complete before letting h/w | |
+ /* Force memory writes to complete before letting h/w | |
* know there are new descriptors to fetch. (Only | |
* applicable for weak-ordered memory model archs, | |
* such as IA-64). | |
*/ | |
wmb(); | |
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | |
- e1000e_update_rdt_wa(adapter, i); | |
+ e1000e_update_rdt_wa(rx_ring, i); | |
else | |
- writel(i, adapter->hw.hw_addr + rx_ring->tail); | |
+ writel(i, rx_ring->tail); | |
} | |
i++; | |
if (i == rx_ring->count) | |
@@ -659,15 +698,15 @@ | |
/** | |
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split | |
- * @adapter: address of board private structure | |
+ * @rx_ring: Rx descriptor ring | |
**/ | |
-static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |
+static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, | |
int cleaned_count, gfp_t gfp) | |
{ | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct net_device *netdev = adapter->netdev; | |
struct pci_dev *pdev = adapter->pdev; | |
union e1000_rx_desc_packet_split *rx_desc; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
struct e1000_buffer *buffer_info; | |
struct e1000_ps_page *ps_page; | |
struct sk_buff *skb; | |
@@ -705,8 +744,7 @@ | |
goto no_buffers; | |
} | |
} | |
- /* | |
- * Refresh the desc even if buffer_addrs | |
+ /* Refresh the desc even if buffer_addrs | |
* didn't change because each write-back | |
* erases this info. | |
*/ | |
@@ -714,8 +752,7 @@ | |
cpu_to_le64(ps_page->dma); | |
} | |
- skb = __netdev_alloc_skb_ip_align(netdev, | |
- adapter->rx_ps_bsize0, | |
+ skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, | |
gfp); | |
if (!skb) { | |
@@ -739,18 +776,16 @@ | |
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); | |
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | |
- /* | |
- * Force memory writes to complete before letting h/w | |
+ /* Force memory writes to complete before letting h/w | |
* know there are new descriptors to fetch. (Only | |
* applicable for weak-ordered memory model archs, | |
* such as IA-64). | |
*/ | |
wmb(); | |
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | |
- e1000e_update_rdt_wa(adapter, i << 1); | |
+ e1000e_update_rdt_wa(rx_ring, i << 1); | |
else | |
- writel(i << 1, | |
- adapter->hw.hw_addr + rx_ring->tail); | |
+ writel(i << 1, rx_ring->tail); | |
} | |
i++; | |
@@ -765,21 +800,21 @@ | |
/** | |
* e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers | |
- * @adapter: address of board private structure | |
+ * @rx_ring: Rx descriptor ring | |
* @cleaned_count: number of buffers to allocate this pass | |
**/ | |
-static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, | |
int cleaned_count, gfp_t gfp) | |
{ | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct net_device *netdev = adapter->netdev; | |
struct pci_dev *pdev = adapter->pdev; | |
union e1000_rx_desc_extended *rx_desc; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
struct e1000_buffer *buffer_info; | |
struct sk_buff *skb; | |
unsigned int i; | |
- unsigned int bufsz = 256 - 16 /* for skb_reserve */; | |
+ unsigned int bufsz = 256 - 16; /* for skb_reserve */ | |
i = rx_ring->next_to_use; | |
buffer_info = &rx_ring->buffer_info[i]; | |
@@ -809,11 +844,16 @@ | |
} | |
} | |
- if (!buffer_info->dma) | |
+ if (!buffer_info->dma) { | |
buffer_info->dma = dma_map_page(&pdev->dev, | |
- buffer_info->page, 0, | |
- PAGE_SIZE, | |
+ buffer_info->page, 0, | |
+ PAGE_SIZE, | |
DMA_FROM_DEVICE); | |
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | |
+ adapter->alloc_rx_buff_failed++; | |
+ break; | |
+ } | |
+ } | |
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); | |
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | |
@@ -831,29 +871,37 @@ | |
/* Force memory writes to complete before letting h/w | |
* know there are new descriptors to fetch. (Only | |
* applicable for weak-ordered memory model archs, | |
- * such as IA-64). */ | |
+ * such as IA-64). | |
+ */ | |
wmb(); | |
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | |
- e1000e_update_rdt_wa(adapter, i); | |
+ e1000e_update_rdt_wa(rx_ring, i); | |
else | |
- writel(i, adapter->hw.hw_addr + rx_ring->tail); | |
+ writel(i, rx_ring->tail); | |
} | |
} | |
+static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, | |
+ struct sk_buff *skb) | |
+{ | |
+ if (netdev->features & NETIF_F_RXHASH) | |
+ skb->rxhash = le32_to_cpu(rss); | |
+} | |
+ | |
/** | |
- * e1000_clean_rx_irq - Send received data up the network stack; legacy | |
- * @adapter: board private structure | |
+ * e1000_clean_rx_irq - Send received data up the network stack | |
+ * @rx_ring: Rx descriptor ring | |
* | |
* the return value indicates whether actual cleaning was done, there | |
* is no guarantee that everything was cleaned | |
**/ | |
-static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |
- int *work_done, int work_to_do) | |
+static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, | |
+ int work_to_do) | |
{ | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct net_device *netdev = adapter->netdev; | |
struct pci_dev *pdev = adapter->pdev; | |
struct e1000_hw *hw = &adapter->hw; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
union e1000_rx_desc_extended *rx_desc, *next_rxd; | |
struct e1000_buffer *buffer_info, *next_buffer; | |
u32 length, staterr; | |
@@ -890,16 +938,13 @@ | |
cleaned = true; | |
cleaned_count++; | |
- dma_unmap_single(&pdev->dev, | |
- buffer_info->dma, | |
- adapter->rx_buffer_len, | |
- DMA_FROM_DEVICE); | |
+ dma_unmap_single(&pdev->dev, buffer_info->dma, | |
+ adapter->rx_buffer_len, DMA_FROM_DEVICE); | |
buffer_info->dma = 0; | |
length = le16_to_cpu(rx_desc->wb.upper.length); | |
- /* | |
- * !EOP means multiple descriptors were used to store a single | |
+ /* !EOP means multiple descriptors were used to store a single | |
* packet, if that's the case we need to toss it. In fact, we | |
* need to toss every packet with the EOP bit clear and the | |
* next frame that _does_ have the EOP bit set, as it is by | |
@@ -918,21 +963,29 @@ | |
goto next_desc; | |
} | |
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { | |
+ if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && | |
+ !(netdev->features & NETIF_F_RXALL))) { | |
/* recycle */ | |
buffer_info->skb = skb; | |
goto next_desc; | |
} | |
/* adjust length to remove Ethernet CRC */ | |
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) | |
- length -= 4; | |
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { | |
+ /* If configured to store CRC, don't subtract FCS, | |
+ * but keep the FCS bytes out of the total_rx_bytes | |
+ * counter | |
+ */ | |
+ if (netdev->features & NETIF_F_RXFCS) | |
+ total_rx_bytes -= 4; | |
+ else | |
+ length -= 4; | |
+ } | |
total_rx_bytes += length; | |
total_rx_packets++; | |
- /* | |
- * code added for copybreak, this should improve | |
+ /* code added for copybreak, this should improve | |
* performance for small packets with large amounts | |
* of reassembly being done in the stack | |
*/ | |
@@ -956,9 +1009,9 @@ | |
skb_put(skb, length); | |
/* Receive Checksum Offload */ | |
- e1000_rx_checksum(adapter, staterr, | |
- le16_to_cpu(rx_desc->wb.lower.hi_dword. | |
- csum_ip.csum), skb); | |
+ e1000_rx_checksum(adapter, staterr, skb); | |
+ | |
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); | |
e1000_receive_skb(adapter, netdev, skb, staterr, | |
rx_desc->wb.upper.vlan); | |
@@ -968,7 +1021,7 @@ | |
/* return some buffers to hardware, one at a time is too slow */ | |
if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | |
- adapter->alloc_rx_buf(adapter, cleaned_count, | |
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, | |
GFP_ATOMIC); | |
cleaned_count = 0; | |
} | |
@@ -983,16 +1036,18 @@ | |
cleaned_count = e1000_desc_unused(rx_ring); | |
if (cleaned_count) | |
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); | |
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); | |
adapter->total_rx_bytes += total_rx_bytes; | |
adapter->total_rx_packets += total_rx_packets; | |
return cleaned; | |
} | |
-static void e1000_put_txbuf(struct e1000_adapter *adapter, | |
- struct e1000_buffer *buffer_info) | |
+static void e1000_put_txbuf(struct e1000_ring *tx_ring, | |
+ struct e1000_buffer *buffer_info) | |
{ | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
+ | |
if (buffer_info->dma) { | |
if (buffer_info->mapped_as_page) | |
dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, | |
@@ -1012,8 +1067,8 @@ | |
static void e1000_print_hw_hang(struct work_struct *work) | |
{ | |
struct e1000_adapter *adapter = container_of(work, | |
- struct e1000_adapter, | |
- print_hang_task); | |
+ struct e1000_adapter, | |
+ print_hang_task); | |
struct net_device *netdev = adapter->netdev; | |
struct e1000_ring *tx_ring = adapter->tx_ring; | |
unsigned int i = tx_ring->next_to_clean; | |
@@ -1026,14 +1081,19 @@ | |
if (test_bit(__E1000_DOWN, &adapter->state)) | |
return; | |
- if (!adapter->tx_hang_recheck && | |
- (adapter->flags2 & FLAG2_DMA_BURST)) { | |
+ if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { | |
/* May be block on write-back, flush and detect again | |
* flush pending descriptor writebacks to memory | |
*/ | |
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | |
/* execute the writes immediately */ | |
e1e_flush(); | |
+ /* Due to rare timing issues, write to TIDV again to ensure | |
+ * the write is successful | |
+ */ | |
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | |
+ /* execute the writes immediately */ | |
+ e1e_flush(); | |
adapter->tx_hang_recheck = true; | |
return; | |
} | |
@@ -1041,9 +1101,9 @@ | |
adapter->tx_hang_recheck = false; | |
netif_stop_queue(netdev); | |
- e1e_rphy(hw, PHY_STATUS, &phy_status); | |
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | |
- e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | |
+ e1e_rphy(hw, MII_BMSR, &phy_status); | |
+ e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); | |
+ e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); | |
pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); | |
@@ -1063,33 +1123,63 @@ | |
"PHY 1000BASE-T Status <%x>\n" | |
"PHY Extended Status <%x>\n" | |
"PCI Status <%x>\n", | |
- readl(adapter->hw.hw_addr + tx_ring->head), | |
- readl(adapter->hw.hw_addr + tx_ring->tail), | |
- tx_ring->next_to_use, | |
- tx_ring->next_to_clean, | |
- tx_ring->buffer_info[eop].time_stamp, | |
- eop, | |
- jiffies, | |
- eop_desc->upper.fields.status, | |
- er32(STATUS), | |
- phy_status, | |
- phy_1000t_status, | |
- phy_ext_status, | |
- pci_status); | |
+ readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, | |
+ tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, | |
+ eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), | |
+ phy_status, phy_1000t_status, phy_ext_status, pci_status); | |
+ | |
+ /* Suggest workaround for known h/w issue */ | |
+ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) | |
+ e_err("Try turning off Tx pause (flow control) via ethtool\n"); | |
+} | |
+ | |
+/** | |
+ * e1000e_tx_hwtstamp_work - check for Tx time stamp | |
+ * @work: pointer to work struct | |
+ * | |
+ * This work function polls the TSYNCTXCTL valid bit to determine when a | |
+ * timestamp has been taken for the current stored skb. The timestamp must | |
+ * be for this skb because only one such packet is allowed in the queue. | |
+ */ | |
+static void e1000e_tx_hwtstamp_work(struct work_struct *work) | |
+{ | |
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, | |
+ tx_hwtstamp_work); | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ | |
+ if (!adapter->tx_hwtstamp_skb) | |
+ return; | |
+ | |
+ if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { | |
+ struct skb_shared_hwtstamps shhwtstamps; | |
+ u64 txstmp; | |
+ | |
+ txstmp = er32(TXSTMPL); | |
+ txstmp |= (u64)er32(TXSTMPH) << 32; | |
+ | |
+ e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); | |
+ | |
+ skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); | |
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb); | |
+ adapter->tx_hwtstamp_skb = NULL; | |
+ } else { | |
+ /* reschedule to check later */ | |
+ schedule_work(&adapter->tx_hwtstamp_work); | |
+ } | |
} | |
/** | |
* e1000_clean_tx_irq - Reclaim resources after transmit completes | |
- * @adapter: board private structure | |
+ * @tx_ring: Tx descriptor ring | |
* | |
* the return value indicates whether actual cleaning was done, there | |
* is no guarantee that everything was cleaned | |
**/ | |
-static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |
+static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) | |
{ | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
struct net_device *netdev = adapter->netdev; | |
struct e1000_hw *hw = &adapter->hw; | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
struct e1000_tx_desc *tx_desc, *eop_desc; | |
struct e1000_buffer *buffer_info; | |
unsigned int i, eop; | |
@@ -1104,7 +1194,7 @@ | |
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && | |
(count < tx_ring->count)) { | |
bool cleaned = false; | |
- rmb(); /* read buffer_info after eop_desc */ | |
+ rmb(); /* read buffer_info after eop_desc */ | |
for (; !cleaned; count++) { | |
tx_desc = E1000_TX_DESC(*tx_ring, i); | |
buffer_info = &tx_ring->buffer_info[i]; | |
@@ -1119,7 +1209,7 @@ | |
} | |
} | |
- e1000_put_txbuf(adapter, buffer_info); | |
+ e1000_put_txbuf(tx_ring, buffer_info); | |
tx_desc->upper.data = 0; | |
i++; | |
@@ -1153,8 +1243,7 @@ | |
} | |
if (adapter->detect_tx_hung) { | |
- /* | |
- * Detect a transmit hang in hardware, this serializes the | |
+ /* Detect a transmit hang in hardware, this serializes the | |
* check with the clearing of time_stamp and movement of i | |
*/ | |
adapter->detect_tx_hung = false; | |
@@ -1173,19 +1262,19 @@ | |
/** | |
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split | |
- * @adapter: board private structure | |
+ * @rx_ring: Rx descriptor ring | |
* | |
* the return value indicates whether actual cleaning was done, there | |
* is no guarantee that everything was cleaned | |
**/ | |
-static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |
- int *work_done, int work_to_do) | |
+static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, | |
+ int work_to_do) | |
{ | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct e1000_hw *hw = &adapter->hw; | |
union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | |
struct net_device *netdev = adapter->netdev; | |
struct pci_dev *pdev = adapter->pdev; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
struct e1000_buffer *buffer_info, *next_buffer; | |
struct e1000_ps_page *ps_page; | |
struct sk_buff *skb; | |
@@ -1236,7 +1325,8 @@ | |
goto next_desc; | |
} | |
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { | |
+ if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && | |
+ !(netdev->features & NETIF_F_RXALL))) { | |
dev_kfree_skb_irq(skb); | |
goto next_desc; | |
} | |
@@ -1253,43 +1343,47 @@ | |
skb_put(skb, length); | |
{ | |
- /* | |
- * this looks ugly, but it seems compiler issues make it | |
- * more efficient than reusing j | |
- */ | |
- int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | |
- | |
- /* | |
- * page alloc/put takes too long and effects small packet | |
- * throughput, so unsplit small packets and save the alloc/put | |
- * only valid in softirq (napi) context to call kmap_* | |
- */ | |
- if (l1 && (l1 <= copybreak) && | |
- ((length + l1) <= adapter->rx_ps_bsize0)) { | |
- u8 *vaddr; | |
- | |
- ps_page = &buffer_info->ps_pages[0]; | |
- | |
- /* | |
- * there is no documentation about how to call | |
- * kmap_atomic, so we can't hold the mapping | |
- * very long | |
+ /* this looks ugly, but it seems compiler issues make | |
+ * it more efficient than reusing j | |
+ */ | |
+ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | |
+ | |
+ /* page alloc/put takes too long and effects small | |
+ * packet throughput, so unsplit small packets and | |
+ * save the alloc/put only valid in softirq (napi) | |
+ * context to call kmap_* | |
*/ | |
- dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, | |
- PAGE_SIZE, DMA_FROM_DEVICE); | |
- vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); | |
- memcpy(skb_tail_pointer(skb), vaddr, l1); | |
- kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | |
- dma_sync_single_for_device(&pdev->dev, ps_page->dma, | |
- PAGE_SIZE, DMA_FROM_DEVICE); | |
- | |
- /* remove the CRC */ | |
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) | |
- l1 -= 4; | |
- | |
- skb_put(skb, l1); | |
- goto copydone; | |
- } /* if */ | |
+ if (l1 && (l1 <= copybreak) && | |
+ ((length + l1) <= adapter->rx_ps_bsize0)) { | |
+ u8 *vaddr; | |
+ | |
+ ps_page = &buffer_info->ps_pages[0]; | |
+ | |
+ /* there is no documentation about how to call | |
+ * kmap_atomic, so we can't hold the mapping | |
+ * very long | |
+ */ | |
+ dma_sync_single_for_cpu(&pdev->dev, | |
+ ps_page->dma, | |
+ PAGE_SIZE, | |
+ DMA_FROM_DEVICE); | |
+ vaddr = kmap_atomic(ps_page->page); | |
+ memcpy(skb_tail_pointer(skb), vaddr, l1); | |
+ kunmap_atomic(vaddr); | |
+ dma_sync_single_for_device(&pdev->dev, | |
+ ps_page->dma, | |
+ PAGE_SIZE, | |
+ DMA_FROM_DEVICE); | |
+ | |
+ /* remove the CRC */ | |
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { | |
+ if (!(netdev->features & NETIF_F_RXFCS)) | |
+ l1 -= 4; | |
+ } | |
+ | |
+ skb_put(skb, l1); | |
+ goto copydone; | |
+ } /* if */ | |
} | |
for (j = 0; j < PS_PAGE_BUFFERS; j++) { | |
@@ -1311,22 +1405,25 @@ | |
/* strip the ethernet crc, problem is we're using pages now so | |
* this whole operation can get a little cpu intensive | |
*/ | |
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) | |
- pskb_trim(skb, skb->len - 4); | |
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { | |
+ if (!(netdev->features & NETIF_F_RXFCS)) | |
+ pskb_trim(skb, skb->len - 4); | |
+ } | |
copydone: | |
total_rx_bytes += skb->len; | |
total_rx_packets++; | |
- e1000_rx_checksum(adapter, staterr, le16_to_cpu( | |
- rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | |
+ e1000_rx_checksum(adapter, staterr, skb); | |
+ | |
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); | |
if (rx_desc->wb.upper.header_status & | |
- cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) | |
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) | |
adapter->rx_hdr_split++; | |
- e1000_receive_skb(adapter, netdev, skb, | |
- staterr, rx_desc->wb.middle.vlan); | |
+ e1000_receive_skb(adapter, netdev, skb, staterr, | |
+ rx_desc->wb.middle.vlan); | |
next_desc: | |
rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); | |
@@ -1334,7 +1431,7 @@ | |
/* return some buffers to hardware, one at a time is too slow */ | |
if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | |
- adapter->alloc_rx_buf(adapter, cleaned_count, | |
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, | |
GFP_ATOMIC); | |
cleaned_count = 0; | |
} | |
@@ -1349,7 +1446,7 @@ | |
cleaned_count = e1000_desc_unused(rx_ring); | |
if (cleaned_count) | |
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); | |
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); | |
adapter->total_rx_bytes += total_rx_bytes; | |
adapter->total_rx_packets += total_rx_packets; | |
@@ -1360,7 +1457,7 @@ | |
* e1000_consume_page - helper function | |
**/ | |
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | |
- u16 length) | |
+ u16 length) | |
{ | |
bi->page = NULL; | |
skb->len += length; | |
@@ -1375,20 +1472,20 @@ | |
* the return value indicates whether actual cleaning was done, there | |
* is no guarantee that everything was cleaned | |
**/ | |
- | |
-static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |
- int *work_done, int work_to_do) | |
+static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, | |
+ int work_to_do) | |
{ | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct net_device *netdev = adapter->netdev; | |
struct pci_dev *pdev = adapter->pdev; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
union e1000_rx_desc_extended *rx_desc, *next_rxd; | |
struct e1000_buffer *buffer_info, *next_buffer; | |
u32 length, staterr; | |
unsigned int i; | |
int cleaned_count = 0; | |
bool cleaned = false; | |
- unsigned int total_rx_bytes=0, total_rx_packets=0; | |
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0; | |
+ struct skb_shared_info *shinfo; | |
i = rx_ring->next_to_clean; | |
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); | |
@@ -1424,7 +1521,8 @@ | |
/* errors is only valid for DD + EOP descriptors */ | |
if (unlikely((staterr & E1000_RXD_STAT_EOP) && | |
- (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) { | |
+ ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && | |
+ !(netdev->features & NETIF_F_RXALL)))) { | |
/* recycle both page and skb */ | |
buffer_info->skb = skb; | |
/* an error means any chain goes out the window too */ | |
@@ -1433,7 +1531,6 @@ | |
rx_ring->rx_skb_top = NULL; | |
goto next_desc; | |
} | |
- | |
#define rxtop (rx_ring->rx_skb_top) | |
if (!(staterr & E1000_RXD_STAT_EOP)) { | |
/* this descriptor is only the beginning (or middle) */ | |
@@ -1441,12 +1538,13 @@ | |
/* this is the beginning of a chain */ | |
rxtop = skb; | |
skb_fill_page_desc(rxtop, 0, buffer_info->page, | |
- 0, length); | |
+ 0, length); | |
} else { | |
/* this is the middle of a chain */ | |
- skb_fill_page_desc(rxtop, | |
- skb_shinfo(rxtop)->nr_frags, | |
- buffer_info->page, 0, length); | |
+ shinfo = skb_shinfo(rxtop); | |
+ skb_fill_page_desc(rxtop, shinfo->nr_frags, | |
+ buffer_info->page, 0, | |
+ length); | |
/* re-use the skb, only consumed the page */ | |
buffer_info->skb = skb; | |
} | |
@@ -1455,44 +1553,46 @@ | |
} else { | |
if (rxtop) { | |
/* end of the chain */ | |
- skb_fill_page_desc(rxtop, | |
- skb_shinfo(rxtop)->nr_frags, | |
- buffer_info->page, 0, length); | |
+ shinfo = skb_shinfo(rxtop); | |
+ skb_fill_page_desc(rxtop, shinfo->nr_frags, | |
+ buffer_info->page, 0, | |
+ length); | |
/* re-use the current skb, we only consumed the | |
- * page */ | |
+ * page | |
+ */ | |
buffer_info->skb = skb; | |
skb = rxtop; | |
rxtop = NULL; | |
e1000_consume_page(buffer_info, skb, length); | |
} else { | |
/* no chain, got EOP, this buf is the packet | |
- * copybreak to save the put_page/alloc_page */ | |
+ * copybreak to save the put_page/alloc_page | |
+ */ | |
if (length <= copybreak && | |
skb_tailroom(skb) >= length) { | |
u8 *vaddr; | |
- vaddr = kmap_atomic(buffer_info->page, | |
- KM_SKB_DATA_SOFTIRQ); | |
+ vaddr = kmap_atomic(buffer_info->page); | |
memcpy(skb_tail_pointer(skb), vaddr, | |
length); | |
- kunmap_atomic(vaddr, | |
- KM_SKB_DATA_SOFTIRQ); | |
+ kunmap_atomic(vaddr); | |
/* re-use the page, so don't erase | |
- * buffer_info->page */ | |
+ * buffer_info->page | |
+ */ | |
skb_put(skb, length); | |
} else { | |
skb_fill_page_desc(skb, 0, | |
- buffer_info->page, 0, | |
- length); | |
+ buffer_info->page, 0, | |
+ length); | |
e1000_consume_page(buffer_info, skb, | |
- length); | |
+ length); | |
} | |
} | |
} | |
- /* Receive Checksum Offload XXX recompute due to CRC strip? */ | |
- e1000_rx_checksum(adapter, staterr, | |
- le16_to_cpu(rx_desc->wb.lower.hi_dword. | |
- csum_ip.csum), skb); | |
+ /* Receive Checksum Offload */ | |
+ e1000_rx_checksum(adapter, staterr, skb); | |
+ | |
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); | |
/* probably a little skewed due to removing CRC */ | |
total_rx_bytes += skb->len; | |
@@ -1513,7 +1613,7 @@ | |
/* return some buffers to hardware, one at a time is too slow */ | |
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | |
- adapter->alloc_rx_buf(adapter, cleaned_count, | |
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, | |
GFP_ATOMIC); | |
cleaned_count = 0; | |
} | |
@@ -1528,7 +1628,7 @@ | |
cleaned_count = e1000_desc_unused(rx_ring); | |
if (cleaned_count) | |
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); | |
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); | |
adapter->total_rx_bytes += total_rx_bytes; | |
adapter->total_rx_packets += total_rx_packets; | |
@@ -1537,11 +1637,11 @@ | |
/** | |
* e1000_clean_rx_ring - Free Rx Buffers per Queue | |
- * @adapter: board private structure | |
+ * @rx_ring: Rx descriptor ring | |
**/ | |
-static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |
+static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) | |
{ | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct e1000_buffer *buffer_info; | |
struct e1000_ps_page *ps_page; | |
struct pci_dev *pdev = adapter->pdev; | |
@@ -1557,8 +1657,7 @@ | |
DMA_FROM_DEVICE); | |
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) | |
dma_unmap_page(&pdev->dev, buffer_info->dma, | |
- PAGE_SIZE, | |
- DMA_FROM_DEVICE); | |
+ PAGE_SIZE, DMA_FROM_DEVICE); | |
else if (adapter->clean_rx == e1000_clean_rx_irq_ps) | |
dma_unmap_single(&pdev->dev, buffer_info->dma, | |
adapter->rx_ps_bsize0, | |
@@ -1601,14 +1700,18 @@ | |
rx_ring->next_to_use = 0; | |
adapter->flags2 &= ~FLAG2_IS_DISCARDING; | |
- writel(0, adapter->hw.hw_addr + rx_ring->head); | |
- writel(0, adapter->hw.hw_addr + rx_ring->tail); | |
+ writel(0, rx_ring->head); | |
+ if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | |
+ e1000e_update_rdt_wa(rx_ring, 0); | |
+ else | |
+ writel(0, rx_ring->tail); | |
} | |
static void e1000e_downshift_workaround(struct work_struct *work) | |
{ | |
struct e1000_adapter *adapter = container_of(work, | |
- struct e1000_adapter, downshift_task); | |
+ struct e1000_adapter, | |
+ downshift_task); | |
if (test_bit(__E1000_DOWN, &adapter->state)) | |
return; | |
@@ -1621,29 +1724,24 @@ | |
* @irq: interrupt number | |
* @data: pointer to a network interface device structure | |
**/ | |
-static irqreturn_t e1000_intr_msi(int irq, void *data) | |
+static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) | |
{ | |
struct net_device *netdev = data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct e1000_hw *hw = &adapter->hw; | |
u32 icr = er32(ICR); | |
- /* | |
- * read ICR disables interrupts using IAM | |
- */ | |
- | |
+ /* read ICR disables interrupts using IAM */ | |
if (icr & E1000_ICR_LSC) { | |
- hw->mac.get_link_status = 1; | |
- /* | |
- * ICH8 workaround-- Call gig speed drop workaround on cable | |
+ hw->mac.get_link_status = true; | |
+ /* ICH8 workaround-- Call gig speed drop workaround on cable | |
* disconnect (LSC) before accessing any PHY registers | |
*/ | |
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | |
(!(er32(STATUS) & E1000_STATUS_LU))) | |
schedule_work(&adapter->downshift_task); | |
- /* | |
- * 80003ES2LAN workaround-- For packet buffer work-around on | |
+ /* 80003ES2LAN workaround-- For packet buffer work-around on | |
* link down event; disable receives here in the ISR and reset | |
* adapter in watchdog | |
*/ | |
@@ -1652,13 +1750,30 @@ | |
/* disable receives */ | |
u32 rctl = er32(RCTL); | |
ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
- adapter->flags |= FLAG_RX_RESTART_NOW; | |
+ adapter->flags |= FLAG_RESTART_NOW; | |
} | |
/* guard against interrupt when we're going down */ | |
if (!test_bit(__E1000_DOWN, &adapter->state)) | |
mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
} | |
+ /* Reset on uncorrectable ECC error */ | |
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { | |
+ u32 pbeccsts = er32(PBECCSTS); | |
+ | |
+ adapter->corr_errors += | |
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; | |
+ adapter->uncorr_errors += | |
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> | |
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; | |
+ | |
+ /* Do the reset outside of interrupt context */ | |
+ schedule_work(&adapter->reset_task); | |
+ | |
+ /* return immediately since reset is imminent */ | |
+ return IRQ_HANDLED; | |
+ } | |
+ | |
if (napi_schedule_prep(&adapter->napi)) { | |
adapter->total_tx_bytes = 0; | |
adapter->total_tx_packets = 0; | |
@@ -1675,7 +1790,7 @@ | |
* @irq: interrupt number | |
* @data: pointer to a network interface device structure | |
**/ | |
-static irqreturn_t e1000_intr(int irq, void *data) | |
+static irqreturn_t e1000_intr(int __always_unused irq, void *data) | |
{ | |
struct net_device *netdev = data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
@@ -1683,33 +1798,29 @@ | |
u32 rctl, icr = er32(ICR); | |
if (!icr || test_bit(__E1000_DOWN, &adapter->state)) | |
- return IRQ_NONE; /* Not our interrupt */ | |
+ return IRQ_NONE; /* Not our interrupt */ | |
- /* | |
- * IMS will not auto-mask if INT_ASSERTED is not set, and if it is | |
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | |
* not set, then the adapter didn't send an interrupt | |
*/ | |
if (!(icr & E1000_ICR_INT_ASSERTED)) | |
return IRQ_NONE; | |
- /* | |
- * Interrupt Auto-Mask...upon reading ICR, | |
+ /* Interrupt Auto-Mask...upon reading ICR, | |
* interrupts are masked. No need for the | |
* IMC write | |
*/ | |
if (icr & E1000_ICR_LSC) { | |
- hw->mac.get_link_status = 1; | |
- /* | |
- * ICH8 workaround-- Call gig speed drop workaround on cable | |
+ hw->mac.get_link_status = true; | |
+ /* ICH8 workaround-- Call gig speed drop workaround on cable | |
* disconnect (LSC) before accessing any PHY registers | |
*/ | |
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | |
(!(er32(STATUS) & E1000_STATUS_LU))) | |
schedule_work(&adapter->downshift_task); | |
- /* | |
- * 80003ES2LAN workaround-- | |
+ /* 80003ES2LAN workaround-- | |
* For packet buffer work-around on link down event; | |
* disable receives here in the ISR and | |
* reset adapter in watchdog | |
@@ -1719,13 +1830,30 @@ | |
/* disable receives */ | |
rctl = er32(RCTL); | |
ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
- adapter->flags |= FLAG_RX_RESTART_NOW; | |
+ adapter->flags |= FLAG_RESTART_NOW; | |
} | |
/* guard against interrupt when we're going down */ | |
if (!test_bit(__E1000_DOWN, &adapter->state)) | |
mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
} | |
+ /* Reset on uncorrectable ECC error */ | |
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { | |
+ u32 pbeccsts = er32(PBECCSTS); | |
+ | |
+ adapter->corr_errors += | |
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; | |
+ adapter->uncorr_errors += | |
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> | |
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; | |
+ | |
+ /* Do the reset outside of interrupt context */ | |
+ schedule_work(&adapter->reset_task); | |
+ | |
+ /* return immediately since reset is imminent */ | |
+ return IRQ_HANDLED; | |
+ } | |
+ | |
if (napi_schedule_prep(&adapter->napi)) { | |
adapter->total_tx_bytes = 0; | |
adapter->total_tx_packets = 0; | |
@@ -1737,7 +1865,7 @@ | |
return IRQ_HANDLED; | |
} | |
-static irqreturn_t e1000_msix_other(int irq, void *data) | |
+static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) | |
{ | |
struct net_device *netdev = data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
@@ -1756,7 +1884,7 @@ | |
if (icr & E1000_ICR_OTHER) { | |
if (!(icr & E1000_ICR_LSC)) | |
goto no_link_interrupt; | |
- hw->mac.get_link_status = 1; | |
+ hw->mac.get_link_status = true; | |
/* guard against interrupt when we're going down */ | |
if (!test_bit(__E1000_DOWN, &adapter->state)) | |
mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
@@ -1769,37 +1897,36 @@ | |
return IRQ_HANDLED; | |
} | |
- | |
-static irqreturn_t e1000_intr_msix_tx(int irq, void *data) | |
+static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) | |
{ | |
struct net_device *netdev = data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct e1000_hw *hw = &adapter->hw; | |
struct e1000_ring *tx_ring = adapter->tx_ring; | |
- | |
adapter->total_tx_bytes = 0; | |
adapter->total_tx_packets = 0; | |
- if (!e1000_clean_tx_irq(adapter)) | |
+ if (!e1000_clean_tx_irq(tx_ring)) | |
/* Ring was not completely cleaned, so fire another interrupt */ | |
ew32(ICS, tx_ring->ims_val); | |
return IRQ_HANDLED; | |
} | |
-static irqreturn_t e1000_intr_msix_rx(int irq, void *data) | |
+static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) | |
{ | |
struct net_device *netdev = data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ struct e1000_ring *rx_ring = adapter->rx_ring; | |
/* Write the ITR value calculated at the end of the | |
* previous interrupt. | |
*/ | |
- if (adapter->rx_ring->set_itr) { | |
- writel(1000000000 / (adapter->rx_ring->itr_val * 256), | |
- adapter->hw.hw_addr + adapter->rx_ring->itr_register); | |
- adapter->rx_ring->set_itr = 0; | |
+ if (rx_ring->set_itr) { | |
+ writel(1000000000 / (rx_ring->itr_val * 256), | |
+ rx_ring->itr_register); | |
+ rx_ring->set_itr = 0; | |
} | |
if (napi_schedule_prep(&adapter->napi)) { | |
@@ -1833,15 +1960,14 @@ | |
ew32(RFCTL, rfctl); | |
} | |
-#define E1000_IVAR_INT_ALLOC_VALID 0x8 | |
/* Configure Rx vector */ | |
rx_ring->ims_val = E1000_IMS_RXQ0; | |
adapter->eiac_mask |= rx_ring->ims_val; | |
if (rx_ring->itr_val) | |
writel(1000000000 / (rx_ring->itr_val * 256), | |
- hw->hw_addr + rx_ring->itr_register); | |
+ rx_ring->itr_register); | |
else | |
- writel(1, hw->hw_addr + rx_ring->itr_register); | |
+ writel(1, rx_ring->itr_register); | |
ivar = E1000_IVAR_INT_ALLOC_VALID | vector; | |
/* Configure Tx vector */ | |
@@ -1849,9 +1975,9 @@ | |
vector++; | |
if (tx_ring->itr_val) | |
writel(1000000000 / (tx_ring->itr_val * 256), | |
- hw->hw_addr + tx_ring->itr_register); | |
+ tx_ring->itr_register); | |
else | |
- writel(1, hw->hw_addr + tx_ring->itr_register); | |
+ writel(1, tx_ring->itr_register); | |
adapter->eiac_mask |= tx_ring->ims_val; | |
ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); | |
@@ -1874,7 +2000,6 @@ | |
ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; | |
/* Auto-Mask Other interrupts upon ICR read */ | |
-#define E1000_EIAC_MASK_82574 0x01F00000 | |
ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); | |
ctrl_ext |= E1000_CTRL_EXT_EIAME; | |
ew32(CTRL_EXT, ctrl_ext); | |
@@ -1909,8 +2034,9 @@ | |
if (adapter->flags & FLAG_HAS_MSIX) { | |
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ | |
adapter->msix_entries = kcalloc(adapter->num_vectors, | |
- sizeof(struct msix_entry), | |
- GFP_KERNEL); | |
+ sizeof(struct | |
+ msix_entry), | |
+ GFP_KERNEL); | |
if (adapter->msix_entries) { | |
for (i = 0; i < adapter->num_vectors; i++) | |
adapter->msix_entries[i].entry = i; | |
@@ -1965,8 +2091,9 @@ | |
e1000_intr_msix_rx, 0, adapter->rx_ring->name, | |
netdev); | |
if (err) | |
- goto out; | |
- adapter->rx_ring->itr_register = E1000_EITR_82574(vector); | |
+ return err; | |
+ adapter->rx_ring->itr_register = adapter->hw.hw_addr + | |
+ E1000_EITR_82574(vector); | |
adapter->rx_ring->itr_val = adapter->itr; | |
vector++; | |
@@ -1980,20 +2107,20 @@ | |
e1000_intr_msix_tx, 0, adapter->tx_ring->name, | |
netdev); | |
if (err) | |
- goto out; | |
- adapter->tx_ring->itr_register = E1000_EITR_82574(vector); | |
+ return err; | |
+ adapter->tx_ring->itr_register = adapter->hw.hw_addr + | |
+ E1000_EITR_82574(vector); | |
adapter->tx_ring->itr_val = adapter->itr; | |
vector++; | |
err = request_irq(adapter->msix_entries[vector].vector, | |
e1000_msix_other, 0, netdev->name, netdev); | |
if (err) | |
- goto out; | |
+ return err; | |
e1000_configure_msix(adapter); | |
+ | |
return 0; | |
-out: | |
- return err; | |
} | |
/** | |
@@ -2087,6 +2214,8 @@ | |
if (adapter->msix_entries) { | |
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); | |
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); | |
+ } else if (hw->mac.type == e1000_pch_lpt) { | |
+ ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); | |
} else { | |
ew32(IMS, IMS_ENABLE_MASK); | |
} | |
@@ -2145,7 +2274,7 @@ | |
} | |
/** | |
- * @e1000_alloc_ring - allocate memory for a ring structure | |
+ * e1000_alloc_ring_dma - allocate memory for a ring structure | |
**/ | |
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, | |
struct e1000_ring *ring) | |
@@ -2162,13 +2291,13 @@ | |
/** | |
* e1000e_setup_tx_resources - allocate Tx resources (Descriptors) | |
- * @adapter: board private structure | |
+ * @tx_ring: Tx descriptor ring | |
* | |
* Return 0 on success, negative on failure | |
**/ | |
-int e1000e_setup_tx_resources(struct e1000_adapter *adapter) | |
+int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) | |
{ | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
int err = -ENOMEM, size; | |
size = sizeof(struct e1000_buffer) * tx_ring->count; | |
@@ -2196,13 +2325,13 @@ | |
/** | |
* e1000e_setup_rx_resources - allocate Rx resources (Descriptors) | |
- * @adapter: board private structure | |
+ * @rx_ring: Rx descriptor ring | |
* | |
* Returns 0 on success, negative on failure | |
**/ | |
-int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | |
+int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) | |
{ | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct e1000_buffer *buffer_info; | |
int i, size, desc_len, err = -ENOMEM; | |
@@ -2249,18 +2378,18 @@ | |
/** | |
* e1000_clean_tx_ring - Free Tx Buffers | |
- * @adapter: board private structure | |
+ * @tx_ring: Tx descriptor ring | |
**/ | |
-static void e1000_clean_tx_ring(struct e1000_adapter *adapter) | |
+static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) | |
{ | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
struct e1000_buffer *buffer_info; | |
unsigned long size; | |
unsigned int i; | |
for (i = 0; i < tx_ring->count; i++) { | |
buffer_info = &tx_ring->buffer_info[i]; | |
- e1000_put_txbuf(adapter, buffer_info); | |
+ e1000_put_txbuf(tx_ring, buffer_info); | |
} | |
netdev_reset_queue(adapter->netdev); | |
@@ -2272,22 +2401,25 @@ | |
tx_ring->next_to_use = 0; | |
tx_ring->next_to_clean = 0; | |
- writel(0, adapter->hw.hw_addr + tx_ring->head); | |
- writel(0, adapter->hw.hw_addr + tx_ring->tail); | |
+ writel(0, tx_ring->head); | |
+ if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | |
+ e1000e_update_tdt_wa(tx_ring, 0); | |
+ else | |
+ writel(0, tx_ring->tail); | |
} | |
/** | |
* e1000e_free_tx_resources - Free Tx Resources per Queue | |
- * @adapter: board private structure | |
+ * @tx_ring: Tx descriptor ring | |
* | |
* Free all transmit software resources | |
**/ | |
-void e1000e_free_tx_resources(struct e1000_adapter *adapter) | |
+void e1000e_free_tx_resources(struct e1000_ring *tx_ring) | |
{ | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
struct pci_dev *pdev = adapter->pdev; | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
- e1000_clean_tx_ring(adapter); | |
+ e1000_clean_tx_ring(tx_ring); | |
vfree(tx_ring->buffer_info); | |
tx_ring->buffer_info = NULL; | |
@@ -2299,18 +2431,17 @@ | |
/** | |
* e1000e_free_rx_resources - Free Rx Resources | |
- * @adapter: board private structure | |
+ * @rx_ring: Rx descriptor ring | |
* | |
* Free all receive software resources | |
**/ | |
- | |
-void e1000e_free_rx_resources(struct e1000_adapter *adapter) | |
+void e1000e_free_rx_resources(struct e1000_ring *rx_ring) | |
{ | |
+ struct e1000_adapter *adapter = rx_ring->adapter; | |
struct pci_dev *pdev = adapter->pdev; | |
- struct e1000_ring *rx_ring = adapter->rx_ring; | |
int i; | |
- e1000_clean_rx_ring(adapter); | |
+ e1000_clean_rx_ring(rx_ring); | |
for (i = 0; i < rx_ring->count; i++) | |
kfree(rx_ring->buffer_info[i].ps_pages); | |
@@ -2339,39 +2470,37 @@ | |
* while increasing bulk throughput. This functionality is controlled | |
* by the InterruptThrottleRate module parameter. | |
**/ | |
-static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | |
- u16 itr_setting, int packets, | |
- int bytes) | |
+static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) | |
{ | |
unsigned int retval = itr_setting; | |
if (packets == 0) | |
- goto update_itr_done; | |
+ return itr_setting; | |
switch (itr_setting) { | |
case lowest_latency: | |
/* handle TSO and jumbo frames */ | |
- if (bytes/packets > 8000) | |
+ if (bytes / packets > 8000) | |
retval = bulk_latency; | |
else if ((packets < 5) && (bytes > 512)) | |
retval = low_latency; | |
break; | |
- case low_latency: /* 50 usec aka 20000 ints/s */ | |
+ case low_latency: /* 50 usec aka 20000 ints/s */ | |
if (bytes > 10000) { | |
/* this if handles the TSO accounting */ | |
- if (bytes/packets > 8000) | |
+ if (bytes / packets > 8000) | |
retval = bulk_latency; | |
- else if ((packets < 10) || ((bytes/packets) > 1200)) | |
+ else if ((packets < 10) || ((bytes / packets) > 1200)) | |
retval = bulk_latency; | |
else if ((packets > 35)) | |
retval = lowest_latency; | |
- } else if (bytes/packets > 2000) { | |
+ } else if (bytes / packets > 2000) { | |
retval = bulk_latency; | |
} else if (packets <= 2 && bytes < 512) { | |
retval = lowest_latency; | |
} | |
break; | |
- case bulk_latency: /* 250 usec aka 4000 ints/s */ | |
+ case bulk_latency: /* 250 usec aka 4000 ints/s */ | |
if (bytes > 25000) { | |
if (packets > 35) | |
retval = low_latency; | |
@@ -2381,13 +2510,11 @@ | |
break; | |
} | |
-update_itr_done: | |
return retval; | |
} | |
static void e1000_set_itr(struct e1000_adapter *adapter) | |
{ | |
- struct e1000_hw *hw = &adapter->hw; | |
u16 current_itr; | |
u32 new_itr = adapter->itr; | |
@@ -2403,31 +2530,29 @@ | |
goto set_itr_now; | |
} | |
- adapter->tx_itr = e1000_update_itr(adapter, | |
- adapter->tx_itr, | |
- adapter->total_tx_packets, | |
- adapter->total_tx_bytes); | |
+ adapter->tx_itr = e1000_update_itr(adapter->tx_itr, | |
+ adapter->total_tx_packets, | |
+ adapter->total_tx_bytes); | |
/* conservative mode (itr 3) eliminates the lowest_latency setting */ | |
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | |
adapter->tx_itr = low_latency; | |
- adapter->rx_itr = e1000_update_itr(adapter, | |
- adapter->rx_itr, | |
- adapter->total_rx_packets, | |
- adapter->total_rx_bytes); | |
+ adapter->rx_itr = e1000_update_itr(adapter->rx_itr, | |
+ adapter->total_rx_packets, | |
+ adapter->total_rx_bytes); | |
/* conservative mode (itr 3) eliminates the lowest_latency setting */ | |
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | |
adapter->rx_itr = low_latency; | |
current_itr = max(adapter->rx_itr, adapter->tx_itr); | |
- switch (current_itr) { | |
/* counts and packets in update_itr are dependent on these numbers */ | |
+ switch (current_itr) { | |
case lowest_latency: | |
new_itr = 70000; | |
break; | |
case low_latency: | |
- new_itr = 20000; /* aka hwitr = ~200 */ | |
+ new_itr = 20000; /* aka hwitr = ~200 */ | |
break; | |
case bulk_latency: | |
new_itr = 4000; | |
@@ -2438,23 +2563,42 @@ | |
set_itr_now: | |
if (new_itr != adapter->itr) { | |
- /* | |
- * this attempts to bias the interrupt rate towards Bulk | |
+ /* this attempts to bias the interrupt rate towards Bulk | |
* by adding intermediate steps when interrupt rate is | |
* increasing | |
*/ | |
new_itr = new_itr > adapter->itr ? | |
- min(adapter->itr + (new_itr >> 2), new_itr) : | |
- new_itr; | |
+ min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; | |
adapter->itr = new_itr; | |
adapter->rx_ring->itr_val = new_itr; | |
if (adapter->msix_entries) | |
adapter->rx_ring->set_itr = 1; | |
else | |
- if (new_itr) | |
- ew32(ITR, 1000000000 / (new_itr * 256)); | |
- else | |
- ew32(ITR, 0); | |
+ e1000e_write_itr(adapter, new_itr); | |
+ } | |
+} | |
+ | |
+/** | |
+ * e1000e_write_itr - write the ITR value to the appropriate registers | |
+ * @adapter: address of board private structure | |
+ * @itr: new ITR value to program | |
+ * | |
+ * e1000e_write_itr determines if the adapter is in MSI-X mode | |
+ * and, if so, writes the EITR registers with the ITR value. | |
+ * Otherwise, it writes the ITR value into the ITR register. | |
+ **/ | |
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) | |
+{ | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; | |
+ | |
+ if (adapter->msix_entries) { | |
+ int vector; | |
+ | |
+ for (vector = 0; vector < adapter->num_vectors; vector++) | |
+ writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); | |
+ } else { | |
+ ew32(ITR, new_itr); | |
} | |
} | |
@@ -2462,15 +2606,21 @@ | |
* e1000_alloc_queues - Allocate memory for all rings | |
* @adapter: board private structure to initialize | |
**/ | |
-static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) | |
+static int e1000_alloc_queues(struct e1000_adapter *adapter) | |
{ | |
- adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | |
+ int size = sizeof(struct e1000_ring); | |
+ | |
+ adapter->tx_ring = kzalloc(size, GFP_KERNEL); | |
if (!adapter->tx_ring) | |
goto err; | |
+ adapter->tx_ring->count = adapter->tx_ring_count; | |
+ adapter->tx_ring->adapter = adapter; | |
- adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | |
+ adapter->rx_ring = kzalloc(size, GFP_KERNEL); | |
if (!adapter->rx_ring) | |
goto err; | |
+ adapter->rx_ring->count = adapter->rx_ring_count; | |
+ adapter->rx_ring->adapter = adapter; | |
return 0; | |
err: | |
@@ -2481,33 +2631,31 @@ | |
} | |
/** | |
- * e1000_clean - NAPI Rx polling callback | |
+ * e1000e_poll - NAPI Rx polling callback | |
* @napi: struct associated with this polling callback | |
- * @budget: amount of packets driver is allowed to process this poll | |
+ * @weight: number of packets driver is allowed to process this poll | |
**/ | |
-static int e1000_clean(struct napi_struct *napi, int budget) | |
+static int e1000e_poll(struct napi_struct *napi, int weight) | |
{ | |
- struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | |
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, | |
+ napi); | |
struct e1000_hw *hw = &adapter->hw; | |
struct net_device *poll_dev = adapter->netdev; | |
int tx_cleaned = 1, work_done = 0; | |
adapter = netdev_priv(poll_dev); | |
- if (adapter->msix_entries && | |
- !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) | |
- goto clean_rx; | |
- | |
- tx_cleaned = e1000_clean_tx_irq(adapter); | |
+ if (!adapter->msix_entries || | |
+ (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) | |
+ tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); | |
-clean_rx: | |
- adapter->clean_rx(adapter, &work_done, budget); | |
+ adapter->clean_rx(adapter->rx_ring, &work_done, weight); | |
if (!tx_cleaned) | |
- work_done = budget; | |
+ work_done = weight; | |
- /* If budget not fully consumed, exit the polling mode */ | |
- if (work_done < budget) { | |
+ /* If weight not fully consumed, exit the polling mode */ | |
+ if (work_done < weight) { | |
if (adapter->itr_setting & 3) | |
e1000_set_itr(adapter); | |
napi_complete(napi); | |
@@ -2522,7 +2670,8 @@ | |
return work_done; | |
} | |
-static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, | |
+ __always_unused __be16 proto, u16 vid) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct e1000_hw *hw = &adapter->hw; | |
@@ -2547,7 +2696,8 @@ | |
return 0; | |
} | |
-static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, | |
+ __always_unused __be16 proto, u16 vid) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct e1000_hw *hw = &adapter->hw; | |
@@ -2591,7 +2741,8 @@ | |
ew32(RCTL, rctl); | |
if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { | |
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | |
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), | |
+ adapter->mng_vlan_id); | |
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
} | |
} | |
@@ -2651,24 +2802,23 @@ | |
u16 vid = adapter->hw.mng_cookie.vlan_id; | |
u16 old_vid = adapter->mng_vlan_id; | |
- if (adapter->hw.mng_cookie.status & | |
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { | |
- e1000_vlan_rx_add_vid(netdev, vid); | |
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { | |
+ e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); | |
adapter->mng_vlan_id = vid; | |
} | |
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) | |
- e1000_vlan_rx_kill_vid(netdev, old_vid); | |
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); | |
} | |
static void e1000_restore_vlan(struct e1000_adapter *adapter) | |
{ | |
u16 vid; | |
- e1000_vlan_rx_add_vid(adapter->netdev, 0); | |
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); | |
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) | |
- e1000_vlan_rx_add_vid(adapter->netdev, vid); | |
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); | |
} | |
static void e1000_init_manageability_pt(struct e1000_adapter *adapter) | |
@@ -2681,8 +2831,7 @@ | |
manc = er32(MANC); | |
- /* | |
- * enable receiving management packets to the host. this will probably | |
+ /* enable receiving management packets to the host. this will probably | |
* generate destination unreachable messages from the host OS, but | |
* the packets will be handled on SMBUS | |
*/ | |
@@ -2695,8 +2844,7 @@ | |
break; | |
case e1000_82574: | |
case e1000_82583: | |
- /* | |
- * Check if IPMI pass-through decision filter already exists; | |
+ /* Check if IPMI pass-through decision filter already exists; | |
* if so, enable it. | |
*/ | |
for (i = 0, j = 0; i < 8; i++) { | |
@@ -2746,31 +2894,18 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
struct e1000_ring *tx_ring = adapter->tx_ring; | |
u64 tdba; | |
- u32 tdlen, tctl, tipg, tarc; | |
- u32 ipgr1, ipgr2; | |
+ u32 tdlen, tarc; | |
/* Setup the HW Tx Head and Tail descriptor pointers */ | |
tdba = tx_ring->dma; | |
tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); | |
- ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); | |
- ew32(TDBAH, (tdba >> 32)); | |
- ew32(TDLEN, tdlen); | |
- ew32(TDH, 0); | |
- ew32(TDT, 0); | |
- tx_ring->head = E1000_TDH; | |
- tx_ring->tail = E1000_TDT; | |
- | |
- /* Set the default values for the Tx Inter Packet Gap timer */ | |
- tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ | |
- ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ | |
- ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ | |
- | |
- if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) | |
- ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ | |
- | |
- tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; | |
- tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; | |
- ew32(TIPG, tipg); | |
+ ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); | |
+ ew32(TDBAH(0), (tdba >> 32)); | |
+ ew32(TDLEN(0), tdlen); | |
+ ew32(TDH(0), 0); | |
+ ew32(TDT(0), 0); | |
+ tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); | |
+ tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); | |
/* Set the Tx Interrupt Delay register */ | |
ew32(TIDV, adapter->tx_int_delay); | |
@@ -2781,11 +2916,10 @@ | |
u32 txdctl = er32(TXDCTL(0)); | |
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | | |
E1000_TXDCTL_WTHRESH); | |
- /* | |
- * set up some performance related parameters to encourage the | |
+ /* set up some performance related parameters to encourage the | |
* hardware to use the bus more efficiently in bursts, depends | |
* on the tx_int_delay to be enabled, | |
- * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time | |
+ * wthresh = 1 ==> burst write is disabled to avoid Tx stalls | |
* hthresh = 1 ==> prefetch when one or more available | |
* pthresh = 0x1f ==> prefetch if internal cache 31 or less | |
* BEWARE: this seems to work but should be considered first if | |
@@ -2793,20 +2927,13 @@ | |
*/ | |
txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; | |
ew32(TXDCTL(0), txdctl); | |
- /* erratum work around: set txdctl the same for both queues */ | |
- ew32(TXDCTL(1), txdctl); | |
} | |
- | |
- /* Program the Transmit Control Register */ | |
- tctl = er32(TCTL); | |
- tctl &= ~E1000_TCTL_CT; | |
- tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | |
- (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | |
+ /* erratum work around: set txdctl the same for both queues */ | |
+ ew32(TXDCTL(1), er32(TXDCTL(0))); | |
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { | |
tarc = er32(TARC(0)); | |
- /* | |
- * set the speed mode bit, we'll clear it if we're not at | |
+ /* set the speed mode bit, we'll clear it if we're not at | |
* gigabit link later | |
*/ | |
#define SPEED_MODE_BIT (1 << 21) | |
@@ -2834,9 +2961,7 @@ | |
/* enable Report Status bit */ | |
adapter->txd_cmd |= E1000_TXD_CMD_RS; | |
- ew32(TCTL, tctl); | |
- | |
- e1000e_config_collision_dist(hw); | |
+ hw->mac.ops.config_collision_dist(hw); | |
} | |
/** | |
@@ -2851,25 +2976,18 @@ | |
u32 rctl, rfctl; | |
u32 pages = 0; | |
- /* Workaround Si errata on 82579 - configure jumbo frame flow */ | |
- if (hw->mac.type == e1000_pch2lan) { | |
- s32 ret_val; | |
- | |
- if (adapter->netdev->mtu > ETH_DATA_LEN) | |
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); | |
- else | |
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); | |
- | |
- if (ret_val) | |
- e_dbg("failed to enable jumbo frame workaround mode\n"); | |
- } | |
+ /* Workaround Si errata on PCHx - configure jumbo frame flow */ | |
+ if ((hw->mac.type >= e1000_pch2lan) && | |
+ (adapter->netdev->mtu > ETH_DATA_LEN) && | |
+ e1000_lv_jumbo_workaround_ich8lan(hw, true)) | |
+ e_dbg("failed to enable jumbo frame workaround mode\n"); | |
/* Program MC offset vector base */ | |
rctl = er32(RCTL); | |
rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | |
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | | |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | |
/* Do not Store bad packets */ | |
rctl &= ~E1000_RCTL_SBP; | |
@@ -2927,9 +3045,9 @@ | |
/* Enable Extended Status in all Receive Descriptors */ | |
rfctl = er32(RFCTL); | |
rfctl |= E1000_RFCTL_EXTEN; | |
+ ew32(RFCTL, rfctl); | |
- /* | |
- * 82571 and greater support packet-split where the protocol | |
+ /* 82571 and greater support packet-split where the protocol | |
* header is placed in skb->data and the packet data is | |
* placed in pages hanging off of skb_shinfo(skb)->nr_frags. | |
* In the case of a non-split, skb->data is linearly filled, | |
@@ -2944,8 +3062,7 @@ | |
* per packet. | |
*/ | |
pages = PAGE_USE_COUNT(adapter->netdev->mtu); | |
- if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && | |
- (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) | |
+ if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) | |
adapter->rx_ps_pages = pages; | |
else | |
adapter->rx_ps_pages = 0; | |
@@ -2953,39 +3070,46 @@ | |
if (adapter->rx_ps_pages) { | |
u32 psrctl = 0; | |
- /* | |
- * disable packet split support for IPv6 extension headers, | |
- * because some malformed IPv6 headers can hang the Rx | |
- */ | |
- rfctl |= (E1000_RFCTL_IPV6_EX_DIS | | |
- E1000_RFCTL_NEW_IPV6_EXT_DIS); | |
- | |
/* Enable Packet split descriptors */ | |
rctl |= E1000_RCTL_DTYP_PS; | |
- psrctl |= adapter->rx_ps_bsize0 >> | |
- E1000_PSRCTL_BSIZE0_SHIFT; | |
+ psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; | |
switch (adapter->rx_ps_pages) { | |
case 3: | |
- psrctl |= PAGE_SIZE << | |
- E1000_PSRCTL_BSIZE3_SHIFT; | |
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; | |
+ /* fall-through */ | |
case 2: | |
- psrctl |= PAGE_SIZE << | |
- E1000_PSRCTL_BSIZE2_SHIFT; | |
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; | |
+ /* fall-through */ | |
case 1: | |
- psrctl |= PAGE_SIZE >> | |
- E1000_PSRCTL_BSIZE1_SHIFT; | |
+ psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; | |
break; | |
} | |
ew32(PSRCTL, psrctl); | |
} | |
- ew32(RFCTL, rfctl); | |
+ /* This is useful for sniffing bad packets. */ | |
+ if (adapter->netdev->features & NETIF_F_RXALL) { | |
+ /* UPE and MPE will be handled by normal PROMISC logic | |
+ * in e1000e_set_rx_mode | |
+ */ | |
+ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ | |
+ E1000_RCTL_BAM | /* RX All Bcast Pkts */ | |
+ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ | |
+ | |
+ rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ | |
+ E1000_RCTL_DPF | /* Allow filtered pause */ | |
+ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ | |
+ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, | |
+ * and that breaks VLANs. | |
+ */ | |
+ } | |
+ | |
ew32(RCTL, rctl); | |
/* just started the receive unit, no need to restart */ | |
- adapter->flags &= ~FLAG_RX_RESTART_NOW; | |
+ adapter->flags &= ~FLAG_RESTART_NOW; | |
} | |
/** | |
@@ -3025,8 +3149,7 @@ | |
usleep_range(10000, 20000); | |
if (adapter->flags2 & FLAG2_DMA_BURST) { | |
- /* | |
- * set the writeback threshold (only takes effect if the RDTR | |
+ /* set the writeback threshold (only takes effect if the RDTR | |
* is set). set GRAN=1 and write back up to 0x4 worth, and | |
* enable prefetching of 0x20 Rx descriptors | |
* granularity = 01 | |
@@ -3037,8 +3160,7 @@ | |
ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); | |
ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); | |
- /* | |
- * override the delay timers for enabling bursting, only if | |
+ /* override the delay timers for enabling bursting, only if | |
* the value was not set by the user via module options | |
*/ | |
if (adapter->rx_int_delay == DEFAULT_RDTR) | |
@@ -3053,7 +3175,7 @@ | |
/* irq moderation */ | |
ew32(RADV, adapter->rx_abs_int_delay); | |
if ((adapter->itr_setting != 0) && (adapter->itr != 0)) | |
- ew32(ITR, 1000000000 / (adapter->itr * 256)); | |
+ e1000e_write_itr(adapter, adapter->itr); | |
ctrl_ext = er32(CTRL_EXT); | |
/* Auto-Mask interrupts upon ICR access */ | |
@@ -3062,58 +3184,43 @@ | |
ew32(CTRL_EXT, ctrl_ext); | |
e1e_flush(); | |
- /* | |
- * Setup the HW Rx Head and Tail Descriptor Pointers and | |
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and | |
* the Base and Length of the Rx Descriptor Ring | |
*/ | |
rdba = rx_ring->dma; | |
- ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); | |
- ew32(RDBAH, (rdba >> 32)); | |
- ew32(RDLEN, rdlen); | |
- ew32(RDH, 0); | |
- ew32(RDT, 0); | |
- rx_ring->head = E1000_RDH; | |
- rx_ring->tail = E1000_RDT; | |
+ ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); | |
+ ew32(RDBAH(0), (rdba >> 32)); | |
+ ew32(RDLEN(0), rdlen); | |
+ ew32(RDH(0), 0); | |
+ ew32(RDT(0), 0); | |
+ rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); | |
+ rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); | |
/* Enable Receive Checksum Offload for TCP and UDP */ | |
rxcsum = er32(RXCSUM); | |
- if (adapter->netdev->features & NETIF_F_RXCSUM) { | |
+ if (adapter->netdev->features & NETIF_F_RXCSUM) | |
rxcsum |= E1000_RXCSUM_TUOFL; | |
- | |
- /* | |
- * IPv4 payload checksum for UDP fragments must be | |
- * used in conjunction with packet-split. | |
- */ | |
- if (adapter->rx_ps_pages) | |
- rxcsum |= E1000_RXCSUM_IPPCSE; | |
- } else { | |
+ else | |
rxcsum &= ~E1000_RXCSUM_TUOFL; | |
- /* no need to clear IPPCSE as it defaults to 0 */ | |
- } | |
ew32(RXCSUM, rxcsum); | |
- /* | |
- * Enable early receives on supported devices, only takes effect when | |
- * packet size is equal or larger than the specified value (in 8 byte | |
- * units), e.g. using jumbo frames when setting to E1000_ERT_2048 | |
+ /* With jumbo frames, excessive C-state transition latencies result | |
+ * in dropped transactions. | |
*/ | |
- if ((adapter->flags & FLAG_HAS_ERT) || | |
- (adapter->hw.mac.type == e1000_pch2lan)) { | |
- if (adapter->netdev->mtu > ETH_DATA_LEN) { | |
+ if (adapter->netdev->mtu > ETH_DATA_LEN) { | |
+ u32 lat = | |
+ ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - | |
+ adapter->max_frame_size) * 8 / 1000; | |
+ | |
+ if (adapter->flags & FLAG_IS_ICH) { | |
u32 rxdctl = er32(RXDCTL(0)); | |
ew32(RXDCTL(0), rxdctl | 0x3); | |
- if (adapter->flags & FLAG_HAS_ERT) | |
- ew32(ERT, E1000_ERT_2048 | (1 << 13)); | |
- /* | |
- * With jumbo frames and early-receive enabled, | |
- * excessive C-state transition latencies result in | |
- * dropped transactions. | |
- */ | |
- pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); | |
- } else { | |
- pm_qos_update_request(&adapter->netdev->pm_qos_req, | |
- PM_QOS_DEFAULT_VALUE); | |
} | |
+ | |
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, lat); | |
+ } else { | |
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, | |
+ PM_QOS_DEFAULT_VALUE); | |
} | |
/* Enable Receives */ | |
@@ -3150,7 +3257,7 @@ | |
/* update_mc_addr_list expects a packed array of only addresses. */ | |
i = 0; | |
netdev_for_each_mc_addr(ha, netdev) | |
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); | |
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); | |
hw->mac.ops.update_mc_addr_list(hw, mta_list, i); | |
kfree(mta_list); | |
@@ -3188,14 +3295,13 @@ | |
if (!netdev_uc_empty(netdev) && rar_entries) { | |
struct netdev_hw_addr *ha; | |
- /* | |
- * write the addresses in reverse order to avoid write | |
+ /* write the addresses in reverse order to avoid write | |
* combining | |
*/ | |
netdev_for_each_uc_addr(ha, netdev) { | |
if (!rar_entries) | |
break; | |
- e1000e_rar_set(hw, ha->addr, rar_entries--); | |
+ hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); | |
count++; | |
} | |
} | |
@@ -3237,11 +3343,11 @@ | |
e1000e_vlan_filter_disable(adapter); | |
} else { | |
int count; | |
+ | |
if (netdev->flags & IFF_ALLMULTI) { | |
rctl |= E1000_RCTL_MPE; | |
} else { | |
- /* | |
- * Write addresses to the MTA, if the attempt fails | |
+ /* Write addresses to the MTA, if the attempt fails | |
* then we should just turn on promiscuous mode so | |
* that we can at least receive multicast traffic | |
*/ | |
@@ -3250,8 +3356,7 @@ | |
rctl |= E1000_RCTL_MPE; | |
} | |
e1000e_vlan_filter_enable(adapter); | |
- /* | |
- * Write addresses to available RAR registers, if there is not | |
+ /* Write addresses to available RAR registers, if there is not | |
* sufficient space to store all the addresses then enable | |
* unicast promiscuous mode | |
*/ | |
@@ -3260,12 +3365,284 @@ | |
rctl |= E1000_RCTL_UPE; | |
} | |
- ew32(RCTL, rctl); | |
+ ew32(RCTL, rctl); | |
+ | |
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) | |
+ e1000e_vlan_strip_enable(adapter); | |
+ else | |
+ e1000e_vlan_strip_disable(adapter); | |
+} | |
+ | |
+static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) | |
+{ | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ u32 mrqc, rxcsum; | |
+ int i; | |
+ static const u32 rsskey[10] = { | |
+ 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, | |
+ 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe | |
+ }; | |
+ | |
+ /* Fill out hash function seed */ | |
+ for (i = 0; i < 10; i++) | |
+ ew32(RSSRK(i), rsskey[i]); | |
+ | |
+ /* Direct all traffic to queue 0 */ | |
+ for (i = 0; i < 32; i++) | |
+ ew32(RETA(i), 0); | |
+ | |
+ /* Disable raw packet checksumming so that RSS hash is placed in | |
+ * descriptor on writeback. | |
+ */ | |
+ rxcsum = er32(RXCSUM); | |
+ rxcsum |= E1000_RXCSUM_PCSD; | |
+ | |
+ ew32(RXCSUM, rxcsum); | |
+ | |
+ mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | | |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP | | |
+ E1000_MRQC_RSS_FIELD_IPV6 | | |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP | | |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); | |
+ | |
+ ew32(MRQC, mrqc); | |
+} | |
+ | |
+/** | |
+ * e1000e_get_base_timinca - get default SYSTIM time increment attributes | |
+ * @adapter: board private structure | |
+ * @timinca: pointer to returned time increment attributes | |
+ * | |
+ * Get attributes for incrementing the System Time Register SYSTIML/H at | |
+ * the default base frequency, and set the cyclecounter shift value. | |
+ **/ | |
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) | |
+{ | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ u32 incvalue, incperiod, shift; | |
+ | |
+ /* Make sure clock is enabled on I217 before checking the frequency */ | |
+ if ((hw->mac.type == e1000_pch_lpt) && | |
+ !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && | |
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { | |
+ u32 fextnvm7 = er32(FEXTNVM7); | |
+ | |
+ if (!(fextnvm7 & (1 << 0))) { | |
+ ew32(FEXTNVM7, fextnvm7 | (1 << 0)); | |
+ e1e_flush(); | |
+ } | |
+ } | |
+ | |
+ switch (hw->mac.type) { | |
+ case e1000_pch2lan: | |
+ case e1000_pch_lpt: | |
+ /* On I217, the clock frequency is 25MHz or 96MHz as | |
+ * indicated by the System Clock Frequency Indication | |
+ */ | |
+ if ((hw->mac.type != e1000_pch_lpt) || | |
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { | |
+ /* Stable 96MHz frequency */ | |
+ incperiod = INCPERIOD_96MHz; | |
+ incvalue = INCVALUE_96MHz; | |
+ shift = INCVALUE_SHIFT_96MHz; | |
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; | |
+ break; | |
+ } | |
+ /* fall-through */ | |
+ case e1000_82574: | |
+ case e1000_82583: | |
+ /* Stable 25MHz frequency */ | |
+ incperiod = INCPERIOD_25MHz; | |
+ incvalue = INCVALUE_25MHz; | |
+ shift = INCVALUE_SHIFT_25MHz; | |
+ adapter->cc.shift = shift; | |
+ break; | |
+ default: | |
+ return -EINVAL; | |
+ } | |
+ | |
+ *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | | |
+ ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); | |
+ | |
+ return 0; | |
+} | |
+ | |
+/** | |
+ * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable | |
+ * @adapter: board private structure | |
+ * | |
+ * Outgoing time stamping can be enabled and disabled. Play nice and | |
+ * disable it when requested, although it shouldn't cause any overhead | |
+ * when no packet needs it. At most one packet in the queue may be | |
+ * marked for time stamping, otherwise it would be impossible to tell | |
+ * for sure to which packet the hardware time stamp belongs. | |
+ * | |
+ * Incoming time stamping has to be configured via the hardware filters. | |
+ * Not all combinations are supported, in particular event type has to be | |
+ * specified. Matching the kind of event packet is not supported, with the | |
+ * exception of "all V2 events regardless of level 2 or 4". | |
+ **/ | |
+static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, | |
+ struct hwtstamp_config *config) | |
+{ | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; | |
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; | |
+ u32 rxmtrl = 0; | |
+ u16 rxudp = 0; | |
+ bool is_l4 = false; | |
+ bool is_l2 = false; | |
+ u32 regval; | |
+ s32 ret_val; | |
+ | |
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) | |
+ return -EINVAL; | |
+ | |
+ /* flags reserved for future extensions - must be zero */ | |
+ if (config->flags) | |
+ return -EINVAL; | |
+ | |
+ switch (config->tx_type) { | |
+ case HWTSTAMP_TX_OFF: | |
+ tsync_tx_ctl = 0; | |
+ break; | |
+ case HWTSTAMP_TX_ON: | |
+ break; | |
+ default: | |
+ return -ERANGE; | |
+ } | |
+ | |
+ switch (config->rx_filter) { | |
+ case HWTSTAMP_FILTER_NONE: | |
+ tsync_rx_ctl = 0; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; | |
+ rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; | |
+ is_l4 = true; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; | |
+ rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; | |
+ is_l4 = true; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
+ /* Also time stamps V2 L2 Path Delay Request/Response */ | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; | |
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; | |
+ is_l2 = true; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
+ /* Also time stamps V2 L2 Path Delay Request/Response. */ | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; | |
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; | |
+ is_l2 = true; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
+ /* Hardware cannot filter just V2 L4 Sync messages; | |
+ * fall-through to V2 (both L2 and L4) Sync. | |
+ */ | |
+ case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
+ /* Also time stamps V2 Path Delay Request/Response. */ | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; | |
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; | |
+ is_l2 = true; | |
+ is_l4 = true; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
+ /* Hardware cannot filter just V2 L4 Delay Request messages; | |
+ * fall-through to V2 (both L2 and L4) Delay Request. | |
+ */ | |
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
+ /* Also time stamps V2 Path Delay Request/Response. */ | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; | |
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; | |
+ is_l2 = true; | |
+ is_l4 = true; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
+ /* Hardware cannot filter just V2 L4 or L2 Event messages; | |
+ * fall-through to all V2 (both L2 and L4) Events. | |
+ */ | |
+ case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; | |
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; | |
+ is_l2 = true; | |
+ is_l4 = true; | |
+ break; | |
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
+ /* For V1, the hardware can only filter Sync messages or | |
+ * Delay Request messages but not both so fall-through to | |
+ * time stamp all packets. | |
+ */ | |
+ case HWTSTAMP_FILTER_ALL: | |
+ is_l2 = true; | |
+ is_l4 = true; | |
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; | |
+ config->rx_filter = HWTSTAMP_FILTER_ALL; | |
+ break; | |
+ default: | |
+ return -ERANGE; | |
+ } | |
+ | |
+ adapter->hwtstamp_config = *config; | |
+ | |
+ /* enable/disable Tx h/w time stamping */ | |
+ regval = er32(TSYNCTXCTL); | |
+ regval &= ~E1000_TSYNCTXCTL_ENABLED; | |
+ regval |= tsync_tx_ctl; | |
+ ew32(TSYNCTXCTL, regval); | |
+ if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != | |
+ (regval & E1000_TSYNCTXCTL_ENABLED)) { | |
+ e_err("Timesync Tx Control register not set as expected\n"); | |
+ return -EAGAIN; | |
+ } | |
+ | |
+ /* enable/disable Rx h/w time stamping */ | |
+ regval = er32(TSYNCRXCTL); | |
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); | |
+ regval |= tsync_rx_ctl; | |
+ ew32(TSYNCRXCTL, regval); | |
+ if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | | |
+ E1000_TSYNCRXCTL_TYPE_MASK)) != | |
+ (regval & (E1000_TSYNCRXCTL_ENABLED | | |
+ E1000_TSYNCRXCTL_TYPE_MASK))) { | |
+ e_err("Timesync Rx Control register not set as expected\n"); | |
+ return -EAGAIN; | |
+ } | |
+ | |
+ /* L2: define ethertype filter for time stamped packets */ | |
+ if (is_l2) | |
+ rxmtrl |= ETH_P_1588; | |
+ | |
+ /* define which PTP packets get time stamped */ | |
+ ew32(RXMTRL, rxmtrl); | |
+ | |
+ /* Filter by destination port */ | |
+ if (is_l4) { | |
+ rxudp = PTP_EV_PORT; | |
+ cpu_to_be16s(&rxudp); | |
+ } | |
+ ew32(RXUDP, rxudp); | |
+ | |
+ e1e_flush(); | |
- if (netdev->features & NETIF_F_HW_VLAN_RX) | |
- e1000e_vlan_strip_enable(adapter); | |
- else | |
- e1000e_vlan_strip_disable(adapter); | |
+ /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ | |
+ er32(RXSTMPH); | |
+ er32(TXSTMPH); | |
+ | |
+ /* Get and set the System Time Register SYSTIM base frequency */ | |
+ ret_val = e1000e_get_base_timinca(adapter, ®val); | |
+ if (ret_val) | |
+ return ret_val; | |
+ ew32(TIMINCA, regval); | |
+ | |
+ /* reset the ns time counter */ | |
+ timecounter_init(&adapter->tc, &adapter->cc, | |
+ ktime_to_ns(ktime_get_real())); | |
+ | |
+ return 0; | |
} | |
/** | |
@@ -3274,16 +3651,20 @@ | |
**/ | |
static void e1000_configure(struct e1000_adapter *adapter) | |
{ | |
+ struct e1000_ring *rx_ring = adapter->rx_ring; | |
+ | |
e1000e_set_rx_mode(adapter->netdev); | |
e1000_restore_vlan(adapter); | |
e1000_init_manageability_pt(adapter); | |
e1000_configure_tx(adapter); | |
+ | |
+ if (adapter->netdev->features & NETIF_F_RXHASH) | |
+ e1000e_setup_rss_hash(adapter); | |
e1000_setup_rctl(adapter); | |
e1000_configure_rx(adapter); | |
- adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), | |
- GFP_KERNEL); | |
+ adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); | |
} | |
/** | |
@@ -3339,8 +3720,7 @@ | |
ew32(PBA, pba); | |
if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { | |
- /* | |
- * To maintain wire speed transmits, the Tx FIFO should be | |
+ /* To maintain wire speed transmits, the Tx FIFO should be | |
* large enough to accommodate two full transmit packets, | |
* rounded up to the next 1KB and expressed in KB. Likewise, | |
* the Rx FIFO should be large enough to accommodate at least | |
@@ -3352,13 +3732,11 @@ | |
tx_space = pba >> 16; | |
/* lower 16 bits has Rx packet buffer allocation size in KB */ | |
pba &= 0xffff; | |
- /* | |
- * the Tx fifo also stores 16 bytes of information about the Tx | |
+ /* the Tx fifo also stores 16 bytes of information about the Tx | |
* but don't include ethernet FCS because hardware appends it | |
*/ | |
min_tx_space = (adapter->max_frame_size + | |
- sizeof(struct e1000_tx_desc) - | |
- ETH_FCS_LEN) * 2; | |
+ sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; | |
min_tx_space = ALIGN(min_tx_space, 1024); | |
min_tx_space >>= 10; | |
/* software strips receive CRC, so leave room for it */ | |
@@ -3366,8 +3744,7 @@ | |
min_rx_space = ALIGN(min_rx_space, 1024); | |
min_rx_space >>= 10; | |
- /* | |
- * If current Tx allocation is less than the min Tx FIFO size, | |
+ /* If current Tx allocation is less than the min Tx FIFO size, | |
* and the min Tx FIFO size is less than the current Rx FIFO | |
* allocation, take space away from current Rx allocation | |
*/ | |
@@ -3375,103 +3752,112 @@ | |
((min_tx_space - tx_space) < pba)) { | |
pba -= min_tx_space - tx_space; | |
- /* | |
- * if short on Rx space, Rx wins and must trump Tx | |
- * adjustment or use Early Receive if available | |
+ /* if short on Rx space, Rx wins and must trump Tx | |
+ * adjustment | |
*/ | |
- if ((pba < min_rx_space) && | |
- (!(adapter->flags & FLAG_HAS_ERT))) | |
- /* ERT enabled in e1000_configure_rx */ | |
+ if (pba < min_rx_space) | |
pba = min_rx_space; | |
} | |
ew32(PBA, pba); | |
} | |
- /* | |
- * flow control settings | |
+ /* flow control settings | |
* | |
* The high water mark must be low enough to fit one full frame | |
* (or the size used for early receive) above it in the Rx FIFO. | |
* Set it to the lower of: | |
* - 90% of the Rx FIFO size, and | |
- * - the full Rx FIFO size minus the early receive size (for parts | |
- * with ERT support assuming ERT set to E1000_ERT_2048), or | |
* - the full Rx FIFO size minus one full frame | |
*/ | |
if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) | |
fc->pause_time = 0xFFFF; | |
else | |
fc->pause_time = E1000_FC_PAUSE_TIME; | |
- fc->send_xon = 1; | |
+ fc->send_xon = true; | |
fc->current_mode = fc->requested_mode; | |
switch (hw->mac.type) { | |
+ case e1000_ich9lan: | |
+ case e1000_ich10lan: | |
+ if (adapter->netdev->mtu > ETH_DATA_LEN) { | |
+ pba = 14; | |
+ ew32(PBA, pba); | |
+ fc->high_water = 0x2800; | |
+ fc->low_water = fc->high_water - 8; | |
+ break; | |
+ } | |
+ /* fall-through */ | |
default: | |
- if ((adapter->flags & FLAG_HAS_ERT) && | |
- (adapter->netdev->mtu > ETH_DATA_LEN)) | |
- hwm = min(((pba << 10) * 9 / 10), | |
- ((pba << 10) - (E1000_ERT_2048 << 3))); | |
- else | |
- hwm = min(((pba << 10) * 9 / 10), | |
- ((pba << 10) - adapter->max_frame_size)); | |
+ hwm = min(((pba << 10) * 9 / 10), | |
+ ((pba << 10) - adapter->max_frame_size)); | |
- fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ | |
+ fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ | |
fc->low_water = fc->high_water - 8; | |
break; | |
case e1000_pchlan: | |
- /* | |
- * Workaround PCH LOM adapter hangs with certain network | |
+ /* Workaround PCH LOM adapter hangs with certain network | |
* loads. If hangs persist, try disabling Tx flow control. | |
*/ | |
if (adapter->netdev->mtu > ETH_DATA_LEN) { | |
fc->high_water = 0x3500; | |
- fc->low_water = 0x1500; | |
+ fc->low_water = 0x1500; | |
} else { | |
fc->high_water = 0x5000; | |
- fc->low_water = 0x3000; | |
+ fc->low_water = 0x3000; | |
} | |
fc->refresh_time = 0x1000; | |
break; | |
case e1000_pch2lan: | |
- fc->high_water = 0x05C20; | |
- fc->low_water = 0x05048; | |
- fc->pause_time = 0x0650; | |
+ case e1000_pch_lpt: | |
fc->refresh_time = 0x0400; | |
- if (adapter->netdev->mtu > ETH_DATA_LEN) { | |
- pba = 14; | |
- ew32(PBA, pba); | |
+ | |
+ if (adapter->netdev->mtu <= ETH_DATA_LEN) { | |
+ fc->high_water = 0x05C20; | |
+ fc->low_water = 0x05048; | |
+ fc->pause_time = 0x0650; | |
+ break; | |
} | |
+ | |
+ pba = 14; | |
+ ew32(PBA, pba); | |
+ fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; | |
+ fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; | |
break; | |
} | |
- /* | |
- * Disable Adaptive Interrupt Moderation if 2 full packets cannot | |
- * fit in receive buffer and early-receive not supported. | |
+ /* Alignment of Tx data is on an arbitrary byte boundary with the | |
+ * maximum size per Tx descriptor limited only to the transmit | |
+ * allocation of the packet buffer minus 96 bytes with an upper | |
+ * limit of 24KB due to receive synchronization limitations. | |
+ */ | |
+ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, | |
+ 24 << 10); | |
+ | |
+ /* Disable Adaptive Interrupt Moderation if 2 full packets cannot | |
+ * fit in receive buffer. | |
*/ | |
if (adapter->itr_setting & 0x3) { | |
- if (((adapter->max_frame_size * 2) > (pba << 10)) && | |
- !(adapter->flags & FLAG_HAS_ERT)) { | |
+ if ((adapter->max_frame_size * 2) > (pba << 10)) { | |
if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { | |
dev_info(&adapter->pdev->dev, | |
- "Interrupt Throttle Rate turned off\n"); | |
+ "Interrupt Throttle Rate off\n"); | |
adapter->flags2 |= FLAG2_DISABLE_AIM; | |
- ew32(ITR, 0); | |
+ e1000e_write_itr(adapter, 0); | |
} | |
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) { | |
dev_info(&adapter->pdev->dev, | |
- "Interrupt Throttle Rate turned on\n"); | |
+ "Interrupt Throttle Rate on\n"); | |
adapter->flags2 &= ~FLAG2_DISABLE_AIM; | |
adapter->itr = 20000; | |
- ew32(ITR, 1000000000 / (adapter->itr * 256)); | |
+ e1000e_write_itr(adapter, adapter->itr); | |
} | |
} | |
/* Allow time for pending master requests to run */ | |
mac->ops.reset_hw(hw); | |
- /* | |
- * For parts with AMT enabled, let the firmware know | |
+ /* For parts with AMT enabled, let the firmware know | |
* that the network interface is in control | |
*/ | |
if (adapter->flags & FLAG_HAS_AMT) | |
@@ -3489,6 +3875,41 @@ | |
e1000e_reset_adaptive(hw); | |
+ /* initialize systim and reset the ns time counter */ | |
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); | |
+ | |
+ /* Set EEE advertisement as appropriate */ | |
+ if (adapter->flags2 & FLAG2_HAS_EEE) { | |
+ s32 ret_val; | |
+ u16 adv_addr; | |
+ | |
+ switch (hw->phy.type) { | |
+ case e1000_phy_82579: | |
+ adv_addr = I82579_EEE_ADVERTISEMENT; | |
+ break; | |
+ case e1000_phy_i217: | |
+ adv_addr = I217_EEE_ADVERTISEMENT; | |
+ break; | |
+ default: | |
+ dev_err(&adapter->pdev->dev, | |
+ "Invalid PHY type setting EEE advertisement\n"); | |
+ return; | |
+ } | |
+ | |
+ ret_val = hw->phy.ops.acquire(hw); | |
+ if (ret_val) { | |
+ dev_err(&adapter->pdev->dev, | |
+ "EEE advertisement - unable to acquire PHY\n"); | |
+ return; | |
+ } | |
+ | |
+ e1000_write_emi_reg_locked(hw, adv_addr, | |
+ hw->dev_spec.ich8lan.eee_disable ? | |
+ 0 : adapter->eee_advert); | |
+ | |
+ hw->phy.ops.release(hw); | |
+ } | |
+ | |
if (!netif_running(adapter->netdev) && | |
!test_bit(__E1000_TESTING, &adapter->state)) { | |
e1000_power_down_phy(adapter); | |
@@ -3500,8 +3921,7 @@ | |
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && | |
!(adapter->flags & FLAG_SMART_POWER_DOWN)) { | |
u16 phy_data = 0; | |
- /* | |
- * speed up time to link by disabling smart power down, ignore | |
+ /* speed up time to link by disabling smart power down, ignore | |
* the return value of this function because there is nothing | |
* different we would do if it failed | |
*/ | |
@@ -3548,6 +3968,15 @@ | |
/* execute the writes immediately */ | |
e1e_flush(); | |
+ | |
+ /* due to rare timing issues, write to TIDV/RDTR again to ensure the | |
+ * write is successful | |
+ */ | |
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | |
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | |
+ | |
+ /* execute the writes immediately */ | |
+ e1e_flush(); | |
} | |
static void e1000e_update_stats(struct e1000_adapter *adapter); | |
@@ -3558,8 +3987,7 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
u32 tctl, rctl; | |
- /* | |
- * signal that we're down so the interrupt handler does not | |
+ /* signal that we're down so the interrupt handler does not | |
* reschedule our watchdog timer | |
*/ | |
set_bit(__E1000_DOWN, &adapter->state); | |
@@ -3595,17 +4023,22 @@ | |
spin_unlock(&adapter->stats64_lock); | |
e1000e_flush_descriptors(adapter); | |
- e1000_clean_tx_ring(adapter); | |
- e1000_clean_rx_ring(adapter); | |
+ e1000_clean_tx_ring(adapter->tx_ring); | |
+ e1000_clean_rx_ring(adapter->rx_ring); | |
adapter->link_speed = 0; | |
adapter->link_duplex = 0; | |
+ /* Disable Si errata workaround on PCHx for jumbo frame flow */ | |
+ if ((hw->mac.type >= e1000_pch2lan) && | |
+ (adapter->netdev->mtu > ETH_DATA_LEN) && | |
+ e1000_lv_jumbo_workaround_ich8lan(hw, false)) | |
+ e_dbg("failed to disable jumbo frame workaround mode\n"); | |
+ | |
if (!pci_channel_offline(adapter->pdev)) | |
e1000e_reset(adapter); | |
- /* | |
- * TODO: for power management, we could drop the link and | |
+ /* TODO: for power management, we could drop the link and | |
* pci_disable_device here. | |
*/ | |
} | |
@@ -3621,6 +4054,24 @@ | |
} | |
/** | |
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) | |
+ * @cc: cyclecounter structure | |
+ **/ | |
+static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |
+{ | |
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, | |
+ cc); | |
+ struct e1000_hw *hw = &adapter->hw; | |
+ cycle_t systim; | |
+ | |
+ /* latch SYSTIMH on read of SYSTIML */ | |
+ systim = (cycle_t)er32(SYSTIML); | |
+ systim |= (cycle_t)er32(SYSTIMH) << 32; | |
+ | |
+ return systim; | |
+} | |
+ | |
+/** | |
* e1000_sw_init - Initialize general software structures (struct e1000_adapter) | |
* @adapter: board private structure to initialize | |
* | |
@@ -3628,7 +4079,7 @@ | |
* Fields are initialized based on PCI device information and | |
* OS network device settings (MTU size). | |
**/ | |
-static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |
+static int e1000_sw_init(struct e1000_adapter *adapter) | |
{ | |
struct net_device *netdev = adapter->netdev; | |
@@ -3636,6 +4087,8 @@ | |
adapter->rx_ps_bsize0 = 128; | |
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | |
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | |
+ adapter->tx_ring_count = E1000_DEFAULT_TXD; | |
+ adapter->rx_ring_count = E1000_DEFAULT_RXD; | |
spin_lock_init(&adapter->stats64_lock); | |
@@ -3644,6 +4097,17 @@ | |
if (e1000_alloc_queues(adapter)) | |
return -ENOMEM; | |
+ /* Setup hardware time stamping cyclecounter */ | |
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { | |
+ adapter->cc.read = e1000e_cyclecounter_read; | |
+ adapter->cc.mask = CLOCKSOURCE_MASK(64); | |
+ adapter->cc.mult = 1; | |
+ /* cc.shift set in e1000e_get_base_tininca() */ | |
+ | |
+ spin_lock_init(&adapter->systim_lock); | |
+ INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); | |
+ } | |
+ | |
/* Explicitly disable IRQ since the NIC can be in any state. */ | |
e1000_irq_disable(adapter); | |
@@ -3656,7 +4120,7 @@ | |
* @irq: interrupt number | |
* @data: pointer to a network interface device structure | |
**/ | |
-static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |
+static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) | |
{ | |
struct net_device *netdev = data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
@@ -3666,6 +4130,9 @@ | |
e_dbg("icr is %08X\n", icr); | |
if (icr & E1000_ICR_RXSEQ) { | |
adapter->flags &= ~FLAG_MSI_TEST_FAILED; | |
+ /* Force memory writes to complete before acknowledging the | |
+ * interrupt is handled. | |
+ */ | |
wmb(); | |
} | |
@@ -3693,7 +4160,8 @@ | |
e1000e_reset_interrupt_capability(adapter); | |
/* Assume that the test fails, if it succeeds then the test | |
- * MSI irq handler will unset this flag */ | |
+ * MSI irq handler will unset this flag | |
+ */ | |
adapter->flags |= FLAG_MSI_TEST_FAILED; | |
err = pci_enable_msi(adapter->pdev); | |
@@ -3707,6 +4175,9 @@ | |
goto msi_test_failed; | |
} | |
+ /* Force memory writes to complete before enabling and firing an | |
+ * interrupt. | |
+ */ | |
wmb(); | |
e1000_irq_enable(adapter); | |
@@ -3714,17 +4185,18 @@ | |
/* fire an unusual interrupt on the test handler */ | |
ew32(ICS, E1000_ICS_RXSEQ); | |
e1e_flush(); | |
- msleep(50); | |
+ msleep(100); | |
e1000_irq_disable(adapter); | |
- rmb(); | |
+ rmb(); /* read flags after interrupt has been fired */ | |
if (adapter->flags & FLAG_MSI_TEST_FAILED) { | |
adapter->int_mode = E1000E_INT_MODE_LEGACY; | |
e_info("MSI interrupt test failed, using legacy interrupt.\n"); | |
- } else | |
+ } else { | |
e_dbg("MSI interrupt test succeeded!\n"); | |
+ } | |
free_irq(adapter->pdev->irq, netdev); | |
pci_disable_msi(adapter->pdev); | |
@@ -3794,17 +4266,16 @@ | |
netif_carrier_off(netdev); | |
/* allocate transmit descriptors */ | |
- err = e1000e_setup_tx_resources(adapter); | |
+ err = e1000e_setup_tx_resources(adapter->tx_ring); | |
if (err) | |
goto err_setup_tx; | |
/* allocate receive descriptors */ | |
- err = e1000e_setup_rx_resources(adapter); | |
+ err = e1000e_setup_rx_resources(adapter->rx_ring); | |
if (err) | |
goto err_setup_rx; | |
- /* | |
- * If AMT is enabled, let the firmware know that the network | |
+ /* If AMT is enabled, let the firmware know that the network | |
* interface is now open and reset the part to a known state. | |
*/ | |
if (adapter->flags & FLAG_HAS_AMT) { | |
@@ -3815,19 +4286,14 @@ | |
e1000e_power_up_phy(adapter); | |
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
- if ((adapter->hw.mng_cookie.status & | |
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) | |
+ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) | |
e1000_update_mng_vlan(adapter); | |
- /* DMA latency requirement to workaround early-receive/jumbo issue */ | |
- if ((adapter->flags & FLAG_HAS_ERT) || | |
- (adapter->hw.mac.type == e1000_pch2lan)) | |
- pm_qos_add_request(&adapter->netdev->pm_qos_req, | |
- PM_QOS_CPU_DMA_LATENCY, | |
- PM_QOS_DEFAULT_VALUE); | |
+ /* DMA latency requirement to workaround jumbo issue */ | |
+ pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | |
+ PM_QOS_DEFAULT_VALUE); | |
- /* | |
- * before we allocate an interrupt, we must be ready to handle it. | |
+ /* before we allocate an interrupt, we must be ready to handle it. | |
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | |
* as soon as we call pci_request_irq, so we have to setup our | |
* clean_rx handler before we do so. | |
@@ -3838,8 +4304,7 @@ | |
if (err) | |
goto err_req_irq; | |
- /* | |
- * Work around PCIe errata with MSI interrupts causing some chipsets to | |
+ /* Work around PCIe errata with MSI interrupts causing some chipsets to | |
* ignore e1000e MSI messages, which means we need to test our MSI | |
* interrupt now | |
*/ | |
@@ -3862,6 +4327,7 @@ | |
netif_start_queue(netdev); | |
adapter->idle_check = true; | |
+ hw->mac.get_link_status = true; | |
pm_runtime_put(&pdev->dev); | |
/* fire a link status change interrupt to start the watchdog */ | |
@@ -3875,9 +4341,9 @@ | |
err_req_irq: | |
e1000e_release_hw_control(adapter); | |
e1000_power_down_phy(adapter); | |
- e1000e_free_rx_resources(adapter); | |
+ e1000e_free_rx_resources(adapter->rx_ring); | |
err_setup_rx: | |
- e1000e_free_tx_resources(adapter); | |
+ e1000e_free_tx_resources(adapter->tx_ring); | |
err_setup_tx: | |
e1000e_reset(adapter); | |
pm_runtime_put_sync(&pdev->dev); | |
@@ -3900,6 +4366,10 @@ | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct pci_dev *pdev = adapter->pdev; | |
+ int count = E1000_CHECK_RESET_COUNT; | |
+ | |
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--) | |
+ usleep_range(10000, 20000); | |
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | |
@@ -3914,33 +4384,30 @@ | |
e1000_power_down_phy(adapter); | |
- e1000e_free_tx_resources(adapter); | |
- e1000e_free_rx_resources(adapter); | |
+ e1000e_free_tx_resources(adapter->tx_ring); | |
+ e1000e_free_rx_resources(adapter->rx_ring); | |
- /* | |
- * kill manageability vlan ID if supported, but not if a vlan with | |
+ /* kill manageability vlan ID if supported, but not if a vlan with | |
* the same ID is registered on the host OS (let 8021q kill it) | |
*/ | |
- if (adapter->hw.mng_cookie.status & | |
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) | |
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | |
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) | |
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), | |
+ adapter->mng_vlan_id); | |
- /* | |
- * If AMT is enabled, let the firmware know that the network | |
+ /* If AMT is enabled, let the firmware know that the network | |
* interface is now closed | |
*/ | |
if ((adapter->flags & FLAG_HAS_AMT) && | |
!test_bit(__E1000_TESTING, &adapter->state)) | |
e1000e_release_hw_control(adapter); | |
- if ((adapter->flags & FLAG_HAS_ERT) || | |
- (adapter->hw.mac.type == e1000_pch2lan)) | |
- pm_qos_remove_request(&adapter->netdev->pm_qos_req); | |
+ pm_qos_remove_request(&adapter->netdev->pm_qos_req); | |
pm_runtime_put_sync(&pdev->dev); | |
return 0; | |
} | |
+ | |
/** | |
* e1000_set_mac - Change the Ethernet Address of the NIC | |
* @netdev: network interface device structure | |
@@ -3951,6 +4418,7 @@ | |
static int e1000_set_mac(struct net_device *netdev, void *p) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ struct e1000_hw *hw = &adapter->hw; | |
struct sockaddr *addr = p; | |
if (!is_valid_ether_addr(addr->sa_data)) | |
@@ -3959,23 +4427,21 @@ | |
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); | |
- e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); | |
+ hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); | |
if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { | |
/* activate the work around */ | |
e1000e_set_laa_state_82571(&adapter->hw, 1); | |
- /* | |
- * Hold a copy of the LAA in RAR[14] This is done so that | |
+ /* Hold a copy of the LAA in RAR[14] This is done so that | |
* between the time RAR[0] gets clobbered and the time it | |
* gets fixed (in e1000_watchdog), the actual LAA is in one | |
* of the RARs and no incoming packets directed to this port | |
* are dropped. Eventually the LAA will be in RAR[0] and | |
* RAR[14] | |
*/ | |
- e1000e_rar_set(&adapter->hw, | |
- adapter->hw.mac.addr, | |
- adapter->hw.mac.rar_entry_count - 1); | |
+ hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, | |
+ adapter->hw.mac.rar_entry_count - 1); | |
} | |
return 0; | |
@@ -3992,7 +4458,8 @@ | |
static void e1000e_update_phy_task(struct work_struct *work) | |
{ | |
struct e1000_adapter *adapter = container_of(work, | |
- struct e1000_adapter, update_phy_task); | |
+ struct e1000_adapter, | |
+ update_phy_task); | |
if (test_bit(__E1000_DOWN, &adapter->state)) | |
return; | |
@@ -4000,13 +4467,16 @@ | |
e1000_get_phy_info(&adapter->hw); | |
} | |
-/* | |
+/** | |
+ * e1000_update_phy_info - timre call-back to update PHY info | |
+ * @data: pointer to adapter cast into an unsigned long | |
+ * | |
* Need to wait a few seconds after link up to get diagnostic information from | |
* the phy | |
- */ | |
+ **/ | |
static void e1000_update_phy_info(unsigned long data) | |
{ | |
- struct e1000_adapter *adapter = (struct e1000_adapter *) data; | |
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data; | |
if (test_bit(__E1000_DOWN, &adapter->state)) | |
return; | |
@@ -4030,8 +4500,7 @@ | |
if (ret_val) | |
return; | |
- /* | |
- * A page set is expensive so check if already on desired page. | |
+ /* A page set is expensive so check if already on desired page. | |
* If not, set to the page with the PHY status registers. | |
*/ | |
hw->phy.addr = 1; | |
@@ -4102,8 +4571,7 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
struct pci_dev *pdev = adapter->pdev; | |
- /* | |
- * Prevent stats update while adapter is being reset, or if the pci | |
+ /* Prevent stats update while adapter is being reset, or if the pci | |
* connection is down. | |
*/ | |
if (adapter->link_speed == 0) | |
@@ -4114,7 +4582,7 @@ | |
adapter->stats.crcerrs += er32(CRCERRS); | |
adapter->stats.gprc += er32(GPRC); | |
adapter->stats.gorc += er32(GORCL); | |
- er32(GORCH); /* Clear gorc */ | |
+ er32(GORCH); /* Clear gorc */ | |
adapter->stats.bprc += er32(BPRC); | |
adapter->stats.mprc += er32(MPRC); | |
adapter->stats.roc += er32(ROC); | |
@@ -4147,7 +4615,7 @@ | |
adapter->stats.xofftxc += er32(XOFFTXC); | |
adapter->stats.gptc += er32(GPTC); | |
adapter->stats.gotc += er32(GOTCL); | |
- er32(GOTCH); /* Clear gotc */ | |
+ er32(GOTCH); /* Clear gotc */ | |
adapter->stats.rnbc += er32(RNBC); | |
adapter->stats.ruc += er32(RUC); | |
@@ -4171,23 +4639,20 @@ | |
/* Rx Errors */ | |
- /* | |
- * RLEC on some newer hardware can be incorrect so build | |
+ /* RLEC on some newer hardware can be incorrect so build | |
* our own version based on RUC and ROC | |
*/ | |
netdev->stats.rx_errors = adapter->stats.rxerrc + | |
- adapter->stats.crcerrs + adapter->stats.algnerrc + | |
- adapter->stats.ruc + adapter->stats.roc + | |
- adapter->stats.cexterr; | |
+ adapter->stats.crcerrs + adapter->stats.algnerrc + | |
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; | |
netdev->stats.rx_length_errors = adapter->stats.ruc + | |
- adapter->stats.roc; | |
+ adapter->stats.roc; | |
netdev->stats.rx_crc_errors = adapter->stats.crcerrs; | |
netdev->stats.rx_frame_errors = adapter->stats.algnerrc; | |
netdev->stats.rx_missed_errors = adapter->stats.mpc; | |
/* Tx Errors */ | |
- netdev->stats.tx_errors = adapter->stats.ecol + | |
- adapter->stats.latecol; | |
+ netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; | |
netdev->stats.tx_aborted_errors = adapter->stats.ecol; | |
netdev->stats.tx_window_errors = adapter->stats.latecol; | |
netdev->stats.tx_carrier_errors = adapter->stats.tncrs; | |
@@ -4198,6 +4663,16 @@ | |
adapter->stats.mgptc += er32(MGTPTC); | |
adapter->stats.mgprc += er32(MGTPRC); | |
adapter->stats.mgpdc += er32(MGTPDC); | |
+ | |
+ /* Correctable ECC Errors */ | |
+ if (hw->mac.type == e1000_pch_lpt) { | |
+ u32 pbeccsts = er32(PBECCSTS); | |
+ adapter->corr_errors += | |
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; | |
+ adapter->uncorr_errors += | |
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> | |
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; | |
+ } | |
} | |
/** | |
@@ -4209,23 +4684,23 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
struct e1000_phy_regs *phy = &adapter->phy_regs; | |
- if ((er32(STATUS) & E1000_STATUS_LU) && | |
+ if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && | |
+ (er32(STATUS) & E1000_STATUS_LU) && | |
(adapter->hw.phy.media_type == e1000_media_type_copper)) { | |
int ret_val; | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); | |
- ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); | |
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); | |
- ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); | |
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); | |
- ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); | |
- ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); | |
- ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); | |
+ ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); | |
+ ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); | |
+ ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); | |
+ ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); | |
+ ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); | |
+ ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); | |
+ ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); | |
if (ret_val) | |
e_warn("Error reading PHY register\n"); | |
} else { | |
- /* | |
- * Do not read PHY registers if link is not up | |
+ /* Do not read PHY registers if link is not up | |
* Set values to typical power-on defaults | |
*/ | |
phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); | |
@@ -4248,9 +4723,8 @@ | |
u32 ctrl = er32(CTRL); | |
/* Link status message must follow this format for user tools */ | |
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", | |
- adapter->netdev->name, | |
- adapter->link_speed, | |
+ pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", | |
+ adapter->netdev->name, adapter->link_speed, | |
adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", | |
(ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : | |
(ctrl & E1000_CTRL_RFCE) ? "Rx" : | |
@@ -4263,8 +4737,7 @@ | |
bool link_active = false; | |
s32 ret_val = 0; | |
- /* | |
- * get_link_status is set on LSC (link status) interrupt or | |
+ /* get_link_status is set on LSC (link status) interrupt or | |
* Rx sequence error interrupt. get_link_status will stay | |
* false until the check_for_link establishes link | |
* for copper adapters ONLY | |
@@ -4304,11 +4777,11 @@ | |
{ | |
/* make sure the receive unit is started */ | |
if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && | |
- (adapter->flags & FLAG_RX_RESTART_NOW)) { | |
+ (adapter->flags & FLAG_RESTART_NOW)) { | |
struct e1000_hw *hw = &adapter->hw; | |
u32 rctl = er32(RCTL); | |
ew32(RCTL, rctl | E1000_RCTL_EN); | |
- adapter->flags &= ~FLAG_RX_RESTART_NOW; | |
+ adapter->flags &= ~FLAG_RESTART_NOW; | |
} | |
} | |
@@ -4316,8 +4789,7 @@ | |
{ | |
struct e1000_hw *hw = &adapter->hw; | |
- /* | |
- * With 82574 controllers, PHY needs to be checked periodically | |
+ /* With 82574 controllers, PHY needs to be checked periodically | |
* for hung state and reset, if two calls return true | |
*/ | |
if (e1000_check_phy_82574(hw)) | |
@@ -4337,7 +4809,7 @@ | |
**/ | |
static void e1000_watchdog(unsigned long data) | |
{ | |
- struct e1000_adapter *adapter = (struct e1000_adapter *) data; | |
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data; | |
/* Do the rest outside of interrupt context */ | |
schedule_work(&adapter->watchdog_task); | |
@@ -4348,7 +4820,8 @@ | |
static void e1000_watchdog_task(struct work_struct *work) | |
{ | |
struct e1000_adapter *adapter = container_of(work, | |
- struct e1000_adapter, watchdog_task); | |
+ struct e1000_adapter, | |
+ watchdog_task); | |
struct net_device *netdev = adapter->netdev; | |
struct e1000_mac_info *mac = &adapter->hw.mac; | |
struct e1000_phy_info *phy = &adapter->hw.phy; | |
@@ -4382,24 +4855,30 @@ | |
/* update snapshot of PHY registers on LSC */ | |
e1000_phy_read_status(adapter); | |
mac->ops.get_link_up_info(&adapter->hw, | |
- &adapter->link_speed, | |
- &adapter->link_duplex); | |
+ &adapter->link_speed, | |
+ &adapter->link_duplex); | |
e1000_print_link_info(adapter); | |
- /* | |
- * On supported PHYs, check for duplex mismatch only | |
+ | |
+ /* check if SmartSpeed worked */ | |
+ e1000e_check_downshift(hw); | |
+ if (phy->speed_downgraded) | |
+ netdev_warn(netdev, | |
+ "Link Speed was downgraded by SmartSpeed\n"); | |
+ | |
+ /* On supported PHYs, check for duplex mismatch only | |
* if link has autonegotiated at 10/100 half | |
*/ | |
if ((hw->phy.type == e1000_phy_igp_3 || | |
hw->phy.type == e1000_phy_bm) && | |
- (hw->mac.autoneg == true) && | |
+ hw->mac.autoneg && | |
(adapter->link_speed == SPEED_10 || | |
adapter->link_speed == SPEED_100) && | |
(adapter->link_duplex == HALF_DUPLEX)) { | |
u16 autoneg_exp; | |
- e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); | |
+ e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); | |
- if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) | |
+ if (!(autoneg_exp & EXPANSION_NWAY)) | |
e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); | |
} | |
@@ -4416,8 +4895,7 @@ | |
break; | |
} | |
- /* | |
- * workaround: re-program speed mode bit after | |
+ /* workaround: re-program speed mode bit after | |
* link-up event | |
*/ | |
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && | |
@@ -4428,8 +4906,7 @@ | |
ew32(TARC(0), tarc0); | |
} | |
- /* | |
- * disable TSO for pcie and 10/100 speeds, to avoid | |
+ /* disable TSO for pcie and 10/100 speeds, to avoid | |
* some hardware issues | |
*/ | |
if (!(adapter->flags & FLAG_TSO_FORCE)) { | |
@@ -4450,16 +4927,14 @@ | |
} | |
} | |
- /* | |
- * enable transmits in the hardware, need to do this | |
+ /* enable transmits in the hardware, need to do this | |
* after setting TARC(0) | |
*/ | |
tctl = er32(TCTL); | |
tctl |= E1000_TCTL_EN; | |
ew32(TCTL, tctl); | |
- /* | |
- * Perform any post-link-up configuration before | |
+ /* Perform any post-link-up configuration before | |
* reporting link up. | |
*/ | |
if (phy->ops.cfg_on_link_up) | |
@@ -4476,18 +4951,25 @@ | |
adapter->link_speed = 0; | |
adapter->link_duplex = 0; | |
/* Link status message must follow this format */ | |
- printk(KERN_INFO "e1000e: %s NIC Link is Down\n", | |
- adapter->netdev->name); | |
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name); | |
netif_carrier_off(netdev); | |
if (!test_bit(__E1000_DOWN, &adapter->state)) | |
mod_timer(&adapter->phy_info_timer, | |
round_jiffies(jiffies + 2 * HZ)); | |
- if (adapter->flags & FLAG_RX_NEEDS_RESTART) | |
- schedule_work(&adapter->reset_task); | |
+ /* The link is lost so the controller stops DMA. | |
+ * If there is queued Tx work that cannot be done | |
+ * or if on an 8000ES2LAN which requires a Rx packet | |
+ * buffer work-around on link down event, reset the | |
+ * controller to flush the Tx/Rx packet buffers. | |
+ * (Do the reset outside of interrupt context). | |
+ */ | |
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || | |
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) | |
+ adapter->flags |= FLAG_RESTART_NOW; | |
else | |
pm_schedule_suspend(netdev->dev.parent, | |
- LINK_TIMEOUT); | |
+ LINK_TIMEOUT); | |
} | |
} | |
@@ -4506,35 +4988,27 @@ | |
adapter->gotc_old = adapter->stats.gotc; | |
spin_unlock(&adapter->stats64_lock); | |
- e1000e_update_adaptive(&adapter->hw); | |
- | |
- if (!netif_carrier_ok(netdev) && | |
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { | |
- /* | |
- * We've lost link, so the controller stops DMA, | |
- * but we've got queued Tx work that's never going | |
- * to get done, so reset controller to flush Tx. | |
- * (Do the reset outside of interrupt context). | |
- */ | |
+ if (adapter->flags & FLAG_RESTART_NOW) { | |
schedule_work(&adapter->reset_task); | |
/* return immediately since reset is imminent */ | |
return; | |
} | |
+ e1000e_update_adaptive(&adapter->hw); | |
+ | |
/* Simple mode for Interrupt Throttle Rate (ITR) */ | |
if (adapter->itr_setting == 4) { | |
- /* | |
- * Symmetric Tx/Rx gets a reduced ITR=2000; | |
+ /* Symmetric Tx/Rx gets a reduced ITR=2000; | |
* Total asymmetrical Tx or Rx gets ITR=8000; | |
* everyone else is between 2000-8000. | |
*/ | |
u32 goc = (adapter->gotc + adapter->gorc) / 10000; | |
u32 dif = (adapter->gotc > adapter->gorc ? | |
- adapter->gotc - adapter->gorc : | |
- adapter->gorc - adapter->gotc) / 10000; | |
+ adapter->gotc - adapter->gorc : | |
+ adapter->gorc - adapter->gotc) / 10000; | |
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; | |
- ew32(ITR, 1000000000 / (itr * 256)); | |
+ e1000e_write_itr(adapter, itr); | |
} | |
/* Cause software interrupt to ensure Rx ring is cleaned */ | |
@@ -4549,16 +5023,26 @@ | |
/* Force detection of hung controller every watchdog period */ | |
adapter->detect_tx_hung = true; | |
- /* | |
- * With 82571 controllers, LAA may be overwritten due to controller | |
+ /* With 82571 controllers, LAA may be overwritten due to controller | |
* reset from the other port. Set the appropriate LAA in RAR[0] | |
*/ | |
if (e1000e_get_laa_state_82571(hw)) | |
- e1000e_rar_set(hw, adapter->hw.mac.addr, 0); | |
+ hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); | |
if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) | |
e1000e_check_82574_phy_workaround(adapter); | |
+ /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ | |
+ if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { | |
+ if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && | |
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { | |
+ er32(RXSTMPH); | |
+ adapter->rx_hwtstamp_cleared++; | |
+ } else { | |
+ adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; | |
+ } | |
+ } | |
+ | |
/* Reset the timer */ | |
if (!test_bit(__E1000_DOWN, &adapter->state)) | |
mod_timer(&adapter->watchdog_timer, | |
@@ -4569,18 +5053,18 @@ | |
#define E1000_TX_FLAGS_VLAN 0x00000002 | |
#define E1000_TX_FLAGS_TSO 0x00000004 | |
#define E1000_TX_FLAGS_IPV4 0x00000008 | |
+#define E1000_TX_FLAGS_NO_FCS 0x00000010 | |
+#define E1000_TX_FLAGS_HWTSTAMP 0x00000020 | |
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | |
#define E1000_TX_FLAGS_VLAN_SHIFT 16 | |
-static int e1000_tso(struct e1000_adapter *adapter, | |
- struct sk_buff *skb) | |
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | |
{ | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
struct e1000_context_desc *context_desc; | |
struct e1000_buffer *buffer_info; | |
unsigned int i; | |
u32 cmd_length = 0; | |
- u16 ipcse = 0, tucse, mss; | |
+ u16 ipcse = 0, mss; | |
u8 ipcss, ipcso, tucss, tucso, hdr_len; | |
if (!skb_is_gso(skb)) | |
@@ -4600,36 +5084,35 @@ | |
iph->tot_len = 0; | |
iph->check = 0; | |
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | |
- 0, IPPROTO_TCP, 0); | |
+ 0, IPPROTO_TCP, 0); | |
cmd_length = E1000_TXD_CMD_IP; | |
ipcse = skb_transport_offset(skb) - 1; | |
} else if (skb_is_gso_v6(skb)) { | |
ipv6_hdr(skb)->payload_len = 0; | |
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
- &ipv6_hdr(skb)->daddr, | |
- 0, IPPROTO_TCP, 0); | |
+ &ipv6_hdr(skb)->daddr, | |
+ 0, IPPROTO_TCP, 0); | |
ipcse = 0; | |
} | |
ipcss = skb_network_offset(skb); | |
ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | |
tucss = skb_transport_offset(skb); | |
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | |
- tucse = 0; | |
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | |
- E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | |
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | |
i = tx_ring->next_to_use; | |
context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | |
buffer_info = &tx_ring->buffer_info[i]; | |
- context_desc->lower_setup.ip_fields.ipcss = ipcss; | |
- context_desc->lower_setup.ip_fields.ipcso = ipcso; | |
- context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | |
+ context_desc->lower_setup.ip_fields.ipcss = ipcss; | |
+ context_desc->lower_setup.ip_fields.ipcso = ipcso; | |
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | |
context_desc->upper_setup.tcp_fields.tucss = tucss; | |
context_desc->upper_setup.tcp_fields.tucso = tucso; | |
- context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | |
- context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | |
+ context_desc->upper_setup.tcp_fields.tucse = 0; | |
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | |
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | |
context_desc->cmd_and_length = cpu_to_le32(cmd_length); | |
@@ -4644,9 +5127,9 @@ | |
return 1; | |
} | |
-static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | |
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) | |
{ | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
struct e1000_context_desc *context_desc; | |
struct e1000_buffer *buffer_info; | |
unsigned int i; | |
@@ -4687,8 +5170,7 @@ | |
context_desc->lower_setup.ip_config = 0; | |
context_desc->upper_setup.tcp_fields.tucss = css; | |
- context_desc->upper_setup.tcp_fields.tucso = | |
- css + skb->csum_offset; | |
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; | |
context_desc->upper_setup.tcp_fields.tucse = 0; | |
context_desc->tcp_seg_setup.data = 0; | |
context_desc->cmd_and_length = cpu_to_le32(cmd_len); | |
@@ -4704,15 +5186,11 @@ | |
return 1; | |
} | |
-#define E1000_MAX_PER_TXD 8192 | |
-#define E1000_MAX_TXD_PWR 12 | |
- | |
-static int e1000_tx_map(struct e1000_adapter *adapter, | |
- struct sk_buff *skb, unsigned int first, | |
- unsigned int max_per_txd, unsigned int nr_frags, | |
- unsigned int mss) | |
+static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, | |
+ unsigned int first, unsigned int max_per_txd, | |
+ unsigned int nr_frags) | |
{ | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
struct pci_dev *pdev = adapter->pdev; | |
struct e1000_buffer *buffer_info; | |
unsigned int len = skb_headlen(skb); | |
@@ -4765,7 +5243,8 @@ | |
buffer_info->time_stamp = jiffies; | |
buffer_info->next_to_watch = i; | |
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, | |
- offset, size, DMA_TO_DEVICE); | |
+ offset, size, | |
+ DMA_TO_DEVICE); | |
buffer_info->mapped_as_page = true; | |
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | |
goto dma_error; | |
@@ -4798,16 +5277,15 @@ | |
i += tx_ring->count; | |
i--; | |
buffer_info = &tx_ring->buffer_info[i]; | |
- e1000_put_txbuf(adapter, buffer_info); | |
+ e1000_put_txbuf(tx_ring, buffer_info); | |
} | |
return 0; | |
} | |
-static void e1000_tx_queue(struct e1000_adapter *adapter, | |
- int tx_flags, int count) | |
+static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) | |
{ | |
- struct e1000_ring *tx_ring = adapter->tx_ring; | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
struct e1000_tx_desc *tx_desc = NULL; | |
struct e1000_buffer *buffer_info; | |
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | |
@@ -4815,7 +5293,7 @@ | |
if (tx_flags & E1000_TX_FLAGS_TSO) { | |
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | | |
- E1000_TXD_CMD_TSE; | |
+ E1000_TXD_CMD_TSE; | |
txd_upper |= E1000_TXD_POPTS_TXSM << 8; | |
if (tx_flags & E1000_TX_FLAGS_IPV4) | |
@@ -4832,14 +5310,22 @@ | |
txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); | |
} | |
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) | |
+ txd_lower &= ~(E1000_TXD_CMD_IFCS); | |
+ | |
+ if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { | |
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; | |
+ txd_upper |= E1000_TXD_EXTCMD_TSTAMP; | |
+ } | |
+ | |
i = tx_ring->next_to_use; | |
do { | |
buffer_info = &tx_ring->buffer_info[i]; | |
tx_desc = E1000_TX_DESC(*tx_ring, i); | |
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | |
- tx_desc->lower.data = | |
- cpu_to_le32(txd_lower | buffer_info->length); | |
+ tx_desc->lower.data = cpu_to_le32(txd_lower | | |
+ buffer_info->length); | |
tx_desc->upper.data = cpu_to_le32(txd_upper); | |
i++; | |
@@ -4849,8 +5335,11 @@ | |
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | |
- /* | |
- * Force memory writes to complete before letting h/w | |
+ /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ | |
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) | |
+ tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); | |
+ | |
+ /* Force memory writes to complete before letting h/w | |
* know there are new descriptors to fetch. (Only | |
* applicable for weak-ordered memory model archs, | |
* such as IA-64). | |
@@ -4860,12 +5349,11 @@ | |
tx_ring->next_to_use = i; | |
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | |
- e1000e_update_tdt_wa(adapter, i); | |
+ e1000e_update_tdt_wa(tx_ring, i); | |
else | |
- writel(i, adapter->hw.hw_addr + tx_ring->tail); | |
+ writel(i, tx_ring->tail); | |
- /* | |
- * we need this if more than one processor can write to our tail | |
+ /* we need this if more than one processor can write to our tail | |
* at a time, it synchronizes IO on IA64/Altix systems | |
*/ | |
mmiowb(); | |
@@ -4875,24 +5363,23 @@ | |
static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | |
struct sk_buff *skb) | |
{ | |
- struct e1000_hw *hw = &adapter->hw; | |
+ struct e1000_hw *hw = &adapter->hw; | |
u16 length, offset; | |
- if (vlan_tx_tag_present(skb)) { | |
- if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | |
- (adapter->hw.mng_cookie.status & | |
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) | |
- return 0; | |
- } | |
+ if (vlan_tx_tag_present(skb) && | |
+ !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | |
+ (adapter->hw.mng_cookie.status & | |
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) | |
+ return 0; | |
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) | |
return 0; | |
- if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) | |
+ if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) | |
return 0; | |
{ | |
- const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); | |
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); | |
struct udphdr *udp; | |
if (ip->protocol != IPPROTO_UDP) | |
@@ -4910,49 +5397,44 @@ | |
return 0; | |
} | |
-static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | |
+static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) | |
{ | |
- struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ struct e1000_adapter *adapter = tx_ring->adapter; | |
- netif_stop_queue(netdev); | |
- /* | |
- * Herbert's original patch had: | |
+ netif_stop_queue(adapter->netdev); | |
+ /* Herbert's original patch had: | |
* smp_mb__after_netif_stop_queue(); | |
* but since that doesn't exist yet, just open code it. | |
*/ | |
smp_mb(); | |
- /* | |
- * We need to check again in a case another CPU has just | |
+ /* We need to check again in a case another CPU has just | |
* made room available. | |
*/ | |
- if (e1000_desc_unused(adapter->tx_ring) < size) | |
+ if (e1000_desc_unused(tx_ring) < size) | |
return -EBUSY; | |
/* A reprieve! */ | |
- netif_start_queue(netdev); | |
+ netif_start_queue(adapter->netdev); | |
++adapter->restart_queue; | |
return 0; | |
} | |
-static int e1000_maybe_stop_tx(struct net_device *netdev, int size) | |
+static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) | |
{ | |
- struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ BUG_ON(size > tx_ring->count); | |
- if (e1000_desc_unused(adapter->tx_ring) >= size) | |
+ if (e1000_desc_unused(tx_ring) >= size) | |
return 0; | |
- return __e1000_maybe_stop_tx(netdev, size); | |
+ return __e1000_maybe_stop_tx(tx_ring, size); | |
} | |
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) | |
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |
struct net_device *netdev) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
struct e1000_ring *tx_ring = adapter->tx_ring; | |
unsigned int first; | |
- unsigned int max_per_txd = E1000_MAX_PER_TXD; | |
- unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | |
unsigned int tx_flags = 0; | |
unsigned int len = skb_headlen(skb); | |
unsigned int nr_frags; | |
@@ -4971,34 +5453,32 @@ | |
return NETDEV_TX_OK; | |
} | |
- mss = skb_shinfo(skb)->gso_size; | |
- /* | |
- * The controller does a simple calculation to | |
- * make sure there is enough room in the FIFO before | |
- * initiating the DMA for each buffer. The calc is: | |
- * 4 = ceil(buffer len/mss). To make sure we don't | |
- * overrun the FIFO, adjust the max buffer len if mss | |
- * drops. | |
+ /* The minimum packet size with TCTL.PSP set is 17 bytes so | |
+ * pad skb in order to meet this minimum size requirement | |
*/ | |
+ if (unlikely(skb->len < 17)) { | |
+ if (skb_pad(skb, 17 - skb->len)) | |
+ return NETDEV_TX_OK; | |
+ skb->len = 17; | |
+ skb_set_tail_pointer(skb, 17); | |
+ } | |
+ | |
+ mss = skb_shinfo(skb)->gso_size; | |
if (mss) { | |
u8 hdr_len; | |
- max_per_txd = min(mss << 2, max_per_txd); | |
- max_txd_pwr = fls(max_per_txd) - 1; | |
- /* | |
- * TSO Workaround for 82571/2/3 Controllers -- if skb->data | |
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data | |
* points to just header, pull a few bytes of payload from | |
* frags into skb->data | |
*/ | |
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
- /* | |
- * we do this workaround for ES2LAN, but it is un-necessary, | |
+ /* we do this workaround for ES2LAN, but it is un-necessary, | |
* avoiding it could save a lot of cycles | |
*/ | |
if (skb->data_len && (hdr_len == len)) { | |
unsigned int pull_size; | |
- pull_size = min((unsigned int)4, skb->data_len); | |
+ pull_size = min_t(unsigned int, 4, skb->data_len); | |
if (!__pskb_pull_tail(skb, pull_size)) { | |
e_err("__pskb_pull_tail failed.\n"); | |
dev_kfree_skb_any(skb); | |
@@ -5013,21 +5493,20 @@ | |
count++; | |
count++; | |
- count += TXD_USE_COUNT(len, max_txd_pwr); | |
+ count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); | |
nr_frags = skb_shinfo(skb)->nr_frags; | |
for (f = 0; f < nr_frags; f++) | |
- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), | |
- max_txd_pwr); | |
+ count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), | |
+ adapter->tx_fifo_limit); | |
if (adapter->hw.mac.tx_pkt_filtering) | |
e1000_transfer_dhcp_info(adapter, skb); | |
- /* | |
- * need: count + 2 desc gap to keep tail from touching | |
+ /* need: count + 2 desc gap to keep tail from touching | |
* head, otherwise try next time | |
*/ | |
- if (e1000_maybe_stop_tx(netdev, count + 2)) | |
+ if (e1000_maybe_stop_tx(tx_ring, count + 2)) | |
return NETDEV_TX_BUSY; | |
if (vlan_tx_tag_present(skb)) { | |
@@ -5037,7 +5516,7 @@ | |
first = tx_ring->next_to_use; | |
- tso = e1000_tso(adapter, skb); | |
+ tso = e1000_tso(tx_ring, skb); | |
if (tso < 0) { | |
dev_kfree_skb_any(skb); | |
return NETDEV_TX_OK; | |
@@ -5045,25 +5524,40 @@ | |
if (tso) | |
tx_flags |= E1000_TX_FLAGS_TSO; | |
- else if (e1000_tx_csum(adapter, skb)) | |
+ else if (e1000_tx_csum(tx_ring, skb)) | |
tx_flags |= E1000_TX_FLAGS_CSUM; | |
- /* | |
- * Old method was to assume IPv4 packet by default if TSO was enabled. | |
+ /* Old method was to assume IPv4 packet by default if TSO was enabled. | |
* 82571 hardware supports TSO capabilities for IPv6 as well... | |
* no longer assume, we must. | |
*/ | |
if (skb->protocol == htons(ETH_P_IP)) | |
tx_flags |= E1000_TX_FLAGS_IPV4; | |
+ if (unlikely(skb->no_fcs)) | |
+ tx_flags |= E1000_TX_FLAGS_NO_FCS; | |
+ | |
/* if count is 0 then mapping error has occurred */ | |
- count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); | |
+ count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, | |
+ nr_frags); | |
if (count) { | |
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
+ !adapter->tx_hwtstamp_skb)) { | |
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
+ tx_flags |= E1000_TX_FLAGS_HWTSTAMP; | |
+ adapter->tx_hwtstamp_skb = skb_get(skb); | |
+ schedule_work(&adapter->tx_hwtstamp_work); | |
+ } else { | |
+ skb_tx_timestamp(skb); | |
+ } | |
+ | |
netdev_sent_queue(netdev, skb->len); | |
- e1000_tx_queue(adapter, tx_flags, count); | |
+ e1000_tx_queue(tx_ring, tx_flags, count); | |
/* Make sure there is space in the ring for the next send. */ | |
- e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); | |
- | |
+ e1000_maybe_stop_tx(tx_ring, | |
+ (MAX_SKB_FRAGS * | |
+ DIV_ROUND_UP(PAGE_SIZE, | |
+ adapter->tx_fifo_limit) + 2)); | |
} else { | |
dev_kfree_skb_any(skb); | |
tx_ring->buffer_info[first].time_stamp = 0; | |
@@ -5095,10 +5589,9 @@ | |
if (test_bit(__E1000_DOWN, &adapter->state)) | |
return; | |
- if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && | |
- (adapter->flags & FLAG_RX_RESTART_NOW))) { | |
+ if (!(adapter->flags & FLAG_RESTART_NOW)) { | |
e1000e_dump(adapter); | |
- e_err("Reset adapter\n"); | |
+ e_err("Reset adapter unexpectedly\n"); | |
} | |
e1000e_reinit_locked(adapter); | |
} | |
@@ -5111,7 +5604,7 @@ | |
* Returns the address of the device statistics structure. | |
**/ | |
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, | |
- struct rtnl_link_stats64 *stats) | |
+ struct rtnl_link_stats64 *stats) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
@@ -5128,23 +5621,19 @@ | |
/* Rx Errors */ | |
- /* | |
- * RLEC on some newer hardware can be incorrect so build | |
+ /* RLEC on some newer hardware can be incorrect so build | |
* our own version based on RUC and ROC | |
*/ | |
stats->rx_errors = adapter->stats.rxerrc + | |
- adapter->stats.crcerrs + adapter->stats.algnerrc + | |
- adapter->stats.ruc + adapter->stats.roc + | |
- adapter->stats.cexterr; | |
- stats->rx_length_errors = adapter->stats.ruc + | |
- adapter->stats.roc; | |
+ adapter->stats.crcerrs + adapter->stats.algnerrc + | |
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; | |
+ stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; | |
stats->rx_crc_errors = adapter->stats.crcerrs; | |
stats->rx_frame_errors = adapter->stats.algnerrc; | |
stats->rx_missed_errors = adapter->stats.mpc; | |
/* Tx Errors */ | |
- stats->tx_errors = adapter->stats.ecol + | |
- adapter->stats.latecol; | |
+ stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; | |
stats->tx_aborted_errors = adapter->stats.ecol; | |
stats->tx_window_errors = adapter->stats.latecol; | |
stats->tx_carrier_errors = adapter->stats.tncrs; | |
@@ -5181,22 +5670,14 @@ | |
return -EINVAL; | |
} | |
- /* Jumbo frame workaround on 82579 requires CRC be stripped */ | |
- if ((adapter->hw.mac.type == e1000_pch2lan) && | |
+ /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ | |
+ if ((adapter->hw.mac.type >= e1000_pch2lan) && | |
!(adapter->flags2 & FLAG2_CRC_STRIPPING) && | |
(new_mtu > ETH_DATA_LEN)) { | |
- e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); | |
+ e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); | |
return -EINVAL; | |
} | |
- /* 82573 Errata 17 */ | |
- if (((adapter->hw.mac.type == e1000_82573) || | |
- (adapter->hw.mac.type == e1000_82574)) && | |
- (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { | |
- adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; | |
- e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); | |
- } | |
- | |
while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | |
usleep_range(1000, 2000); | |
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ | |
@@ -5206,8 +5687,7 @@ | |
if (netif_running(netdev)) | |
e1000e_down(adapter); | |
- /* | |
- * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | |
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | |
* means we reserve 2 more, this pushes us to allocate from the next | |
* larger slab size. | |
* i.e. RXBUFFER_2048 --> size-4096 slab | |
@@ -5222,9 +5702,9 @@ | |
/* adjust allocation if LPE protects us, and we aren't using SBP */ | |
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | |
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | |
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | |
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | |
- + ETH_FCS_LEN; | |
+ + ETH_FCS_LEN; | |
if (netif_running(netdev)) | |
e1000e_up(adapter); | |
@@ -5294,6 +5774,65 @@ | |
return 0; | |
} | |
+/** | |
+ * e1000e_hwtstamp_ioctl - control hardware time stamping | |
+ * @netdev: network interface device structure | |
+ * @ifreq: interface request | |
+ * | |
+ * Outgoing time stamping can be enabled and disabled. Play nice and | |
+ * disable it when requested, although it shouldn't cause any overhead | |
+ * when no packet needs it. At most one packet in the queue may be | |
+ * marked for time stamping, otherwise it would be impossible to tell | |
+ * for sure to which packet the hardware time stamp belongs. | |
+ * | |
+ * Incoming time stamping has to be configured via the hardware filters. | |
+ * Not all combinations are supported, in particular event type has to be | |
+ * specified. Matching the kind of event packet is not supported, with the | |
+ * exception of "all V2 events regardless of level 2 or 4". | |
+ **/ | |
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) | |
+{ | |
+ struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ struct hwtstamp_config config; | |
+ int ret_val; | |
+ | |
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
+ return -EFAULT; | |
+ | |
+ ret_val = e1000e_config_hwtstamp(adapter, &config); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ switch (config.rx_filter) { | |
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
+ case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
+ /* With V2 type filters which specify a Sync or Delay Request, | |
+ * Path Delay Request/Response messages are also time stamped | |
+ * by hardware so notify the caller the requested packets plus | |
+ * some others are time stamped. | |
+ */ | |
+ config.rx_filter = HWTSTAMP_FILTER_SOME; | |
+ break; | |
+ default: | |
+ break; | |
+ } | |
+ | |
+ return copy_to_user(ifr->ifr_data, &config, | |
+ sizeof(config)) ? -EFAULT : 0; | |
+} | |
+ | |
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) | |
+{ | |
+ struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ | |
+ return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, | |
+ sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; | |
+} | |
+ | |
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
{ | |
switch (cmd) { | |
@@ -5301,6 +5840,10 @@ | |
case SIOCGMIIREG: | |
case SIOCSMIIREG: | |
return e1000_mii_ioctl(netdev, ifr, cmd); | |
+ case SIOCSHWTSTAMP: | |
+ return e1000e_hwtstamp_set(netdev, ifr); | |
+ case SIOCGHWTSTAMP: | |
+ return e1000e_hwtstamp_get(netdev, ifr); | |
default: | |
return -EOPNOTSUPP; | |
} | |
@@ -5311,7 +5854,7 @@ | |
struct e1000_hw *hw = &adapter->hw; | |
u32 i, mac_reg; | |
u16 phy_reg, wuc_enable; | |
- int retval = 0; | |
+ int retval; | |
/* copy MAC RARs to PHY RARs */ | |
e1000_copy_rx_addrs_to_phy_ich8lan(hw); | |
@@ -5325,7 +5868,7 @@ | |
/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ | |
retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); | |
if (retval) | |
- goto out; | |
+ goto release; | |
/* copy MAC MTA to PHY MTA - only needed for pchlan */ | |
for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { | |
@@ -5346,7 +5889,7 @@ | |
phy_reg &= ~(BM_RCTL_MO_MASK); | |
if (mac_reg & E1000_RCTL_MO_3) | |
phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) | |
- << BM_RCTL_MO_SHIFT); | |
+ << BM_RCTL_MO_SHIFT); | |
if (mac_reg & E1000_RCTL_BAM) | |
phy_reg |= BM_RCTL_BAM; | |
if (mac_reg & E1000_RCTL_PMCF) | |
@@ -5369,14 +5912,13 @@ | |
retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); | |
if (retval) | |
e_err("Could not set PHY Host Wakeup bit\n"); | |
-out: | |
+release: | |
hw->phy.ops.release(hw); | |
return retval; | |
} | |
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | |
- bool runtime) | |
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) | |
{ | |
struct net_device *netdev = pci_get_drvdata(pdev); | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
@@ -5389,16 +5931,17 @@ | |
netif_device_detach(netdev); | |
if (netif_running(netdev)) { | |
+ int count = E1000_CHECK_RESET_COUNT; | |
+ | |
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--) | |
+ usleep_range(10000, 20000); | |
+ | |
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | |
e1000e_down(adapter); | |
e1000_free_irq(adapter); | |
} | |
e1000e_reset_interrupt_capability(adapter); | |
- retval = pci_save_state(pdev); | |
- if (retval) | |
- return retval; | |
- | |
status = er32(STATUS); | |
if (status & E1000_STATUS_LU) | |
wufc &= ~E1000_WUFC_LNKC; | |
@@ -5415,10 +5958,6 @@ | |
} | |
ctrl = er32(CTRL); | |
- /* advertise wake from D3Cold */ | |
- #define E1000_CTRL_ADVD3WUC 0x00100000 | |
- /* phy power management enable */ | |
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | |
ctrl |= E1000_CTRL_ADVD3WUC; | |
if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) | |
ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; | |
@@ -5454,103 +5993,112 @@ | |
ew32(WUFC, 0); | |
} | |
- *enable_wake = !!wufc; | |
- | |
- /* make sure adapter isn't asleep if manageability is enabled */ | |
- if ((adapter->flags & FLAG_MNG_PT_ENABLED) || | |
- (hw->mac.ops.check_mng_mode(hw))) | |
- *enable_wake = true; | |
- | |
if (adapter->hw.phy.type == e1000_phy_igp_3) | |
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | |
- /* | |
- * Release control of h/w to f/w. If f/w is AMT enabled, this | |
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this | |
* would have already happened in close and is redundant. | |
*/ | |
e1000e_release_hw_control(adapter); | |
- pci_disable_device(pdev); | |
- | |
- return 0; | |
-} | |
- | |
-static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) | |
-{ | |
- if (sleep && wake) { | |
- pci_prepare_to_sleep(pdev); | |
- return; | |
- } | |
- | |
- pci_wake_from_d3(pdev, wake); | |
- pci_set_power_state(pdev, PCI_D3hot); | |
-} | |
- | |
-static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | |
- bool wake) | |
-{ | |
- struct net_device *netdev = pci_get_drvdata(pdev); | |
- struct e1000_adapter *adapter = netdev_priv(netdev); | |
+ pci_clear_master(pdev); | |
- /* | |
- * The pci-e switch on some quad port adapters will report a | |
+ /* The pci-e switch on some quad port adapters will report a | |
* correctable error when the MAC transitions from D0 to D3. To | |
* prevent this we need to mask off the correctable errors on the | |
* downstream port of the pci-e switch. | |
+ * | |
+ * We don't have the associated upstream bridge while assigning | |
+ * the PCI device into guest. For example, the KVM on power is | |
+ * one of the cases. | |
*/ | |
if (adapter->flags & FLAG_IS_QUAD_PORT) { | |
struct pci_dev *us_dev = pdev->bus->self; | |
- int pos = pci_pcie_cap(us_dev); | |
u16 devctl; | |
- pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); | |
- pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, | |
- (devctl & ~PCI_EXP_DEVCTL_CERE)); | |
+ if (!us_dev) | |
+ return 0; | |
- e1000_power_off(pdev, sleep, wake); | |
+ pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); | |
+ pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, | |
+ (devctl & ~PCI_EXP_DEVCTL_CERE)); | |
- pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); | |
- } else { | |
- e1000_power_off(pdev, sleep, wake); | |
+ pci_save_state(pdev); | |
+ pci_prepare_to_sleep(pdev); | |
+ | |
+ pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); | |
} | |
+ | |
+ return 0; | |
} | |
-#ifdef CONFIG_PCIEASPM | |
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | |
+/** | |
+ * e1000e_disable_aspm - Disable ASPM states | |
+ * @pdev: pointer to PCI device struct | |
+ * @state: bit-mask of ASPM states to disable | |
+ * | |
+ * Some devices *must* have certain ASPM states disabled per hardware errata. | |
+ **/ | |
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | |
{ | |
+ struct pci_dev *parent = pdev->bus->self; | |
+ u16 aspm_dis_mask = 0; | |
+ u16 pdev_aspmc, parent_aspmc; | |
+ | |
+ switch (state) { | |
+ case PCIE_LINK_STATE_L0S: | |
+ case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: | |
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; | |
+ /* fall-through - can't have L1 without L0s */ | |
+ case PCIE_LINK_STATE_L1: | |
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; | |
+ break; | |
+ default: | |
+ return; | |
+ } | |
+ | |
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); | |
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; | |
+ | |
+ if (parent) { | |
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, | |
+ &parent_aspmc); | |
+ parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; | |
+ } | |
+ | |
+ /* Nothing to do if the ASPM states to be disabled already are */ | |
+ if (!(pdev_aspmc & aspm_dis_mask) && | |
+ (!parent || !(parent_aspmc & aspm_dis_mask))) | |
+ return; | |
+ | |
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n", | |
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? | |
+ "L0s" : "", | |
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? | |
+ "L1" : ""); | |
+ | |
+#ifdef CONFIG_PCIEASPM | |
pci_disable_link_state_locked(pdev, state); | |
-} | |
-#else | |
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | |
-{ | |
- int pos; | |
- u16 reg16; | |
- /* | |
- * Both device and parent should have the same ASPM setting. | |
- * Disable ASPM in downstream component first and then upstream. | |
+ /* Double-check ASPM control. If not disabled by the above, the | |
+ * BIOS is preventing that from happening (or CONFIG_PCIEASPM is | |
+ * not enabled); override by writing PCI config space directly. | |
*/ | |
- pos = pci_pcie_cap(pdev); | |
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | |
- reg16 &= ~state; | |
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | |
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); | |
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; | |
- if (!pdev->bus->self) | |
+ if (!(aspm_dis_mask & pdev_aspmc)) | |
return; | |
- | |
- pos = pci_pcie_cap(pdev->bus->self); | |
- pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); | |
- reg16 &= ~state; | |
- pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); | |
-} | |
#endif | |
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | |
-{ | |
- dev_info(&pdev->dev, "Disabling ASPM %s %s\n", | |
- (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", | |
- (state & PCIE_LINK_STATE_L1) ? "L1" : ""); | |
- __e1000e_disable_aspm(pdev, state); | |
+ /* Both device and parent should have the same ASPM setting. | |
+ * Disable ASPM in downstream component first and then upstream. | |
+ */ | |
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); | |
+ | |
+ if (parent) | |
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, | |
+ aspm_dis_mask); | |
} | |
#ifdef CONFIG_PM | |
@@ -5574,9 +6122,7 @@ | |
if (aspm_disable_flag) | |
e1000e_disable_aspm(pdev, aspm_disable_flag); | |
- pci_set_power_state(pdev, PCI_D0); | |
- pci_restore_state(pdev); | |
- pci_save_state(pdev); | |
+ pci_set_master(pdev); | |
e1000e_set_interrupt_capability(adapter); | |
if (netif_running(netdev)) { | |
@@ -5585,7 +6131,7 @@ | |
return err; | |
} | |
- if (hw->mac.type == e1000_pch2lan) | |
+ if (hw->mac.type >= e1000_pch2lan) | |
e1000_resume_workarounds_pchlan(&adapter->hw); | |
e1000e_power_up_phy(adapter); | |
@@ -5597,24 +6143,24 @@ | |
e1e_rphy(&adapter->hw, BM_WUS, &phy_data); | |
if (phy_data) { | |
e_info("PHY Wakeup cause - %s\n", | |
- phy_data & E1000_WUS_EX ? "Unicast Packet" : | |
- phy_data & E1000_WUS_MC ? "Multicast Packet" : | |
- phy_data & E1000_WUS_BC ? "Broadcast Packet" : | |
- phy_data & E1000_WUS_MAG ? "Magic Packet" : | |
- phy_data & E1000_WUS_LNKC ? | |
- "Link Status Change" : "other"); | |
+ phy_data & E1000_WUS_EX ? "Unicast Packet" : | |
+ phy_data & E1000_WUS_MC ? "Multicast Packet" : | |
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" : | |
+ phy_data & E1000_WUS_MAG ? "Magic Packet" : | |
+ phy_data & E1000_WUS_LNKC ? | |
+ "Link Status Change" : "other"); | |
} | |
e1e_wphy(&adapter->hw, BM_WUS, ~0); | |
} else { | |
u32 wus = er32(WUS); | |
if (wus) { | |
e_info("MAC Wakeup cause - %s\n", | |
- wus & E1000_WUS_EX ? "Unicast Packet" : | |
- wus & E1000_WUS_MC ? "Multicast Packet" : | |
- wus & E1000_WUS_BC ? "Broadcast Packet" : | |
- wus & E1000_WUS_MAG ? "Magic Packet" : | |
- wus & E1000_WUS_LNKC ? "Link Status Change" : | |
- "other"); | |
+ wus & E1000_WUS_EX ? "Unicast Packet" : | |
+ wus & E1000_WUS_MC ? "Multicast Packet" : | |
+ wus & E1000_WUS_BC ? "Broadcast Packet" : | |
+ wus & E1000_WUS_MAG ? "Magic Packet" : | |
+ wus & E1000_WUS_LNKC ? "Link Status Change" : | |
+ "other"); | |
} | |
ew32(WUS, ~0); | |
} | |
@@ -5628,8 +6174,7 @@ | |
netif_device_attach(netdev); | |
- /* | |
- * If the controller has AMT, do not set DRV_LOAD until the interface | |
+ /* If the controller has AMT, do not set DRV_LOAD until the interface | |
* is up. For all other cases, let the f/w know that the h/w is now | |
* under the control of the driver. | |
*/ | |
@@ -5643,14 +6188,8 @@ | |
static int e1000_suspend(struct device *dev) | |
{ | |
struct pci_dev *pdev = to_pci_dev(dev); | |
- int retval; | |
- bool wake; | |
- | |
- retval = __e1000_shutdown(pdev, &wake, false); | |
- if (!retval) | |
- e1000_complete_shutdown(pdev, true, wake); | |
- return retval; | |
+ return __e1000_shutdown(pdev, false); | |
} | |
static int e1000_resume(struct device *dev) | |
@@ -5673,13 +6212,10 @@ | |
struct net_device *netdev = pci_get_drvdata(pdev); | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
- if (e1000e_pm_ready(adapter)) { | |
- bool wake; | |
- | |
- __e1000_shutdown(pdev, &wake, true); | |
- } | |
+ if (!e1000e_pm_ready(adapter)) | |
+ return 0; | |
- return 0; | |
+ return __e1000_shutdown(pdev, true); | |
} | |
static int e1000_idle(struct device *dev) | |
@@ -5717,17 +6253,12 @@ | |
static void e1000_shutdown(struct pci_dev *pdev) | |
{ | |
- bool wake = false; | |
- | |
- __e1000_shutdown(pdev, &wake, false); | |
- | |
- if (system_state == SYSTEM_POWER_OFF) | |
- e1000_complete_shutdown(pdev, false, wake); | |
+ __e1000_shutdown(pdev, false); | |
} | |
#ifdef CONFIG_NET_POLL_CONTROLLER | |
-static irqreturn_t e1000_intr_msix(int irq, void *data) | |
+static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) | |
{ | |
struct net_device *netdev = data; | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
@@ -5757,7 +6288,10 @@ | |
return IRQ_HANDLED; | |
} | |
-/* | |
+/** | |
+ * e1000_netpoll | |
+ * @netdev: network interface device structure | |
+ * | |
* Polling 'interrupt' - used by things like netconsole to send skbs | |
* without having to re-enable interrupts. It's not called while | |
* the interrupt routine is executing. | |
@@ -5775,7 +6309,7 @@ | |
e1000_intr_msi(adapter->pdev->irq, netdev); | |
enable_irq(adapter->pdev->irq); | |
break; | |
- default: /* E1000E_INT_MODE_LEGACY */ | |
+ default: /* E1000E_INT_MODE_LEGACY */ | |
disable_irq(adapter->pdev->irq); | |
e1000_intr(adapter->pdev->irq, netdev); | |
enable_irq(adapter->pdev->irq); | |
@@ -5840,9 +6374,9 @@ | |
"Cannot re-enable PCI device after reset.\n"); | |
result = PCI_ERS_RESULT_DISCONNECT; | |
} else { | |
- pci_set_master(pdev); | |
pdev->state_saved = true; | |
pci_restore_state(pdev); | |
+ pci_set_master(pdev); | |
pci_enable_wake(pdev, PCI_D3hot, 0); | |
pci_enable_wake(pdev, PCI_D3cold, 0); | |
@@ -5882,14 +6416,12 @@ | |
netif_device_attach(netdev); | |
- /* | |
- * If the controller has AMT, do not set DRV_LOAD until the interface | |
+ /* If the controller has AMT, do not set DRV_LOAD until the interface | |
* is up. For all other cases, let the f/w know that the h/w is now | |
* under the control of the driver. | |
*/ | |
if (!(adapter->flags & FLAG_HAS_AMT)) | |
e1000e_get_hw_control(adapter); | |
- | |
} | |
static void e1000_print_device_info(struct e1000_adapter *adapter) | |
@@ -5903,7 +6435,7 @@ | |
e_info("(PCI Express:2.5GT/s:%s) %pM\n", | |
/* bus width */ | |
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : | |
- "Width x1"), | |
+ "Width x1"), | |
/* MAC address */ | |
netdev->dev_addr); | |
e_info("Intel(R) PRO/%s Network Connection\n", | |
@@ -5911,7 +6443,7 @@ | |
ret_val = e1000_read_pba_string_generic(hw, pba_str, | |
E1000_PBANUM_LENGTH); | |
if (ret_val) | |
- strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); | |
+ strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); | |
e_info("MAC: %d, PHY: %d, PBA No: %s\n", | |
hw->mac.type, hw->phy.type, pba_str); | |
} | |
@@ -5926,7 +6458,8 @@ | |
return; | |
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); | |
- if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { | |
+ le16_to_cpus(&buf); | |
+ if (!ret_val && (!(buf & (1 << 0)))) { | |
/* Deep Smart Power Down (DSPD) */ | |
dev_warn(&adapter->pdev->dev, | |
"Warning: detected DSPD enabled in EEPROM\n"); | |
@@ -5934,7 +6467,7 @@ | |
} | |
static int e1000_set_features(struct net_device *netdev, | |
- netdev_features_t features) | |
+ netdev_features_t features) | |
{ | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
netdev_features_t changed = features ^ netdev->features; | |
@@ -5942,10 +6475,27 @@ | |
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) | |
adapter->flags |= FLAG_TSO_FORCE; | |
- if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | | |
- NETIF_F_RXCSUM))) | |
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | | |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | | |
+ NETIF_F_RXALL))) | |
return 0; | |
+ if (changed & NETIF_F_RXFCS) { | |
+ if (features & NETIF_F_RXFCS) { | |
+ adapter->flags2 &= ~FLAG2_CRC_STRIPPING; | |
+ } else { | |
+ /* We need to take it back to defaults, which might mean | |
+ * stripping is still disabled at the adapter level. | |
+ */ | |
+ if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) | |
+ adapter->flags2 |= FLAG2_CRC_STRIPPING; | |
+ else | |
+ adapter->flags2 &= ~FLAG2_CRC_STRIPPING; | |
+ } | |
+ } | |
+ | |
+ netdev->features = features; | |
+ | |
if (netif_running(netdev)) | |
e1000e_reinit_locked(adapter); | |
else | |
@@ -5985,8 +6535,7 @@ | |
* The OS initialization, configuring of the adapter private structure, | |
* and a hardware reset occur. | |
**/ | |
-static int __devinit e1000_probe(struct pci_dev *pdev, | |
- const struct pci_device_id *ent) | |
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
{ | |
struct net_device *netdev; | |
struct e1000_adapter *adapter; | |
@@ -5994,10 +6543,9 @@ | |
const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; | |
resource_size_t mmio_start, mmio_len; | |
resource_size_t flash_start, flash_len; | |
- | |
static int cards_found; | |
u16 aspm_disable_flag = 0; | |
- int i, err, pci_using_dac; | |
+ int bars, i, err, pci_using_dac; | |
u16 eeprom_data = 0; | |
u16 eeprom_apme_mask = E1000_EEPROM_APME; | |
@@ -6013,26 +6561,21 @@ | |
return err; | |
pci_using_dac = 0; | |
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); | |
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
if (!err) { | |
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | |
- if (!err) | |
- pci_using_dac = 1; | |
+ pci_using_dac = 1; | |
} else { | |
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
if (err) { | |
- err = dma_set_coherent_mask(&pdev->dev, | |
- DMA_BIT_MASK(32)); | |
- if (err) { | |
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); | |
- goto err_dma; | |
- } | |
+ dev_err(&pdev->dev, | |
+ "No usable DMA configuration, aborting\n"); | |
+ goto err_dma; | |
} | |
} | |
- err = pci_request_selected_regions_exclusive(pdev, | |
- pci_select_bars(pdev, IORESOURCE_MEM), | |
- e1000e_driver_name); | |
+ bars = pci_select_bars(pdev, IORESOURCE_MEM); | |
+ err = pci_request_selected_regions_exclusive(pdev, bars, | |
+ e1000e_driver_name); | |
if (err) | |
goto err_pci_reg; | |
@@ -6066,7 +6609,7 @@ | |
adapter->hw.adapter = adapter; | |
adapter->hw.mac.type = ei->mac; | |
adapter->max_hw_frame_size = ei->max_hw_frame_size; | |
- adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; | |
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); | |
mmio_start = pci_resource_start(pdev, 0); | |
mmio_len = pci_resource_len(pdev, 0); | |
@@ -6085,12 +6628,16 @@ | |
goto err_flashmap; | |
} | |
+ /* Set default EEE advertisement */ | |
+ if (adapter->flags2 & FLAG2_HAS_EEE) | |
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; | |
+ | |
/* construct the net_device struct */ | |
- netdev->netdev_ops = &e1000e_netdev_ops; | |
+ netdev->netdev_ops = &e1000e_netdev_ops; | |
e1000e_set_ethtool_ops(netdev); | |
- netdev->watchdog_timeo = 5 * HZ; | |
- netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); | |
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | |
+ netdev->watchdog_timeo = 5 * HZ; | |
+ netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); | |
+ strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); | |
netdev->mem_start = mmio_start; | |
netdev->mem_end = mmio_start + mmio_len; | |
@@ -6127,23 +6674,28 @@ | |
adapter->hw.phy.ms_type = e1000_ms_hw_default; | |
} | |
- if (e1000_check_reset_block(&adapter->hw)) | |
- e_info("PHY reset is blocked due to SOL/IDER session.\n"); | |
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) | |
+ dev_info(&pdev->dev, | |
+ "PHY reset is blocked due to SOL/IDER session.\n"); | |
/* Set initial default active device features */ | |
netdev->features = (NETIF_F_SG | | |
- NETIF_F_HW_VLAN_RX | | |
- NETIF_F_HW_VLAN_TX | | |
+ NETIF_F_HW_VLAN_CTAG_RX | | |
+ NETIF_F_HW_VLAN_CTAG_TX | | |
NETIF_F_TSO | | |
NETIF_F_TSO6 | | |
+ NETIF_F_RXHASH | | |
NETIF_F_RXCSUM | | |
NETIF_F_HW_CSUM); | |
/* Set user-changeable features (subset of all device features) */ | |
netdev->hw_features = netdev->features; | |
+ netdev->hw_features |= NETIF_F_RXFCS; | |
+ netdev->priv_flags |= IFF_SUPP_NOFCS; | |
+ netdev->hw_features |= NETIF_F_RXALL; | |
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) | |
- netdev->features |= NETIF_F_HW_VLAN_FILTER; | |
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | |
netdev->vlan_features |= (NETIF_F_SG | | |
NETIF_F_TSO | | |
@@ -6160,21 +6712,19 @@ | |
if (e1000e_enable_mng_pass_thru(&adapter->hw)) | |
adapter->flags |= FLAG_MNG_PT_ENABLED; | |
- /* | |
- * before reading the NVM, reset the controller to | |
+ /* before reading the NVM, reset the controller to | |
* put the device in a known good starting state | |
*/ | |
adapter->hw.mac.ops.reset_hw(&adapter->hw); | |
- /* | |
- * systems with ASPM and others may see the checksum fail on the first | |
+ /* systems with ASPM and others may see the checksum fail on the first | |
* attempt. Let's give it a few tries | |
*/ | |
for (i = 0;; i++) { | |
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) | |
break; | |
if (i == 2) { | |
- e_err("The NVM Checksum Is Not Valid\n"); | |
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); | |
err = -EIO; | |
goto err_eeprom; | |
} | |
@@ -6184,24 +6734,25 @@ | |
/* copy the MAC address */ | |
if (e1000e_read_mac_addr(&adapter->hw)) | |
- e_err("NVM Read Error while reading MAC address\n"); | |
+ dev_err(&pdev->dev, | |
+ "NVM Read Error while reading MAC address\n"); | |
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); | |
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); | |
- if (!is_valid_ether_addr(netdev->perm_addr)) { | |
- e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); | |
+ if (!is_valid_ether_addr(netdev->dev_addr)) { | |
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", | |
+ netdev->dev_addr); | |
err = -EIO; | |
goto err_eeprom; | |
} | |
init_timer(&adapter->watchdog_timer); | |
adapter->watchdog_timer.function = e1000_watchdog; | |
- adapter->watchdog_timer.data = (unsigned long) adapter; | |
+ adapter->watchdog_timer.data = (unsigned long)adapter; | |
init_timer(&adapter->phy_info_timer); | |
adapter->phy_info_timer.function = e1000_update_phy_info; | |
- adapter->phy_info_timer.data = (unsigned long) adapter; | |
+ adapter->phy_info_timer.data = (unsigned long)adapter; | |
INIT_WORK(&adapter->reset_task, e1000_reset_task); | |
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | |
@@ -6216,12 +6767,7 @@ | |
adapter->hw.fc.current_mode = e1000_fc_default; | |
adapter->hw.phy.autoneg_advertised = 0x2f; | |
- /* ring size defaults */ | |
- adapter->rx_ring->count = 256; | |
- adapter->tx_ring->count = 256; | |
- | |
- /* | |
- * Initial Wake on LAN setting - If APM wake is enabled in | |
+ /* Initial Wake on LAN setting - If APM wake is enabled in | |
* the EEPROM, enable the ACPI Magic Packet filter | |
*/ | |
if (adapter->flags & FLAG_APME_IN_WUC) { | |
@@ -6234,19 +6780,18 @@ | |
} else if (adapter->flags & FLAG_APME_IN_CTRL3) { | |
if (adapter->flags & FLAG_APME_CHECK_PORT_B && | |
(adapter->hw.bus.func == 1)) | |
- e1000_read_nvm(&adapter->hw, | |
- NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | |
+ e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, | |
+ 1, &eeprom_data); | |
else | |
- e1000_read_nvm(&adapter->hw, | |
- NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | |
+ e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, | |
+ 1, &eeprom_data); | |
} | |
/* fetch WoL from EEPROM */ | |
if (eeprom_data & eeprom_apme_mask) | |
adapter->eeprom_wol |= E1000_WUFC_MAG; | |
- /* | |
- * now that we have the eeprom settings, apply the special cases | |
+ /* now that we have the eeprom settings, apply the special cases | |
* where the eeprom may be wrong or the board simply won't support | |
* wake on lan on a particular port | |
*/ | |
@@ -6255,7 +6800,11 @@ | |
/* initialize the wol settings based on the eeprom settings */ | |
adapter->wol = adapter->eeprom_wol; | |
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | |
+ | |
+ /* make sure adapter isn't asleep if manageability is enabled */ | |
+ if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || | |
+ (hw->mac.ops.check_mng_mode(hw))) | |
+ device_wakeup_enable(&pdev->dev); | |
/* save off EEPROM version number */ | |
e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); | |
@@ -6263,15 +6812,14 @@ | |
/* reset the hardware with the new settings */ | |
e1000e_reset(adapter); | |
- /* | |
- * If the controller has AMT, do not set DRV_LOAD until the interface | |
+ /* If the controller has AMT, do not set DRV_LOAD until the interface | |
* is up. For all other cases, let the f/w know that the h/w is now | |
* under the control of the driver. | |
*/ | |
if (!(adapter->flags & FLAG_HAS_AMT)) | |
e1000e_get_hw_control(adapter); | |
- strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); | |
+ strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); | |
err = register_netdev(netdev); | |
if (err) | |
goto err_register; | |
@@ -6279,6 +6827,9 @@ | |
/* carrier off reporting is important to ethtool even BEFORE open */ | |
netif_carrier_off(netdev); | |
+ /* init PTP hardware clock */ | |
+ e1000e_ptp_init(adapter); | |
+ | |
e1000_print_device_info(adapter); | |
if (pci_dev_run_wake(pdev)) | |
@@ -6290,7 +6841,7 @@ | |
if (!(adapter->flags & FLAG_HAS_AMT)) | |
e1000e_release_hw_control(adapter); | |
err_eeprom: | |
- if (!e1000_check_reset_block(&adapter->hw)) | |
+ if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) | |
e1000_phy_hw_reset(&adapter->hw); | |
err_hw_init: | |
kfree(adapter->tx_ring); | |
@@ -6305,7 +6856,7 @@ | |
free_netdev(netdev); | |
err_alloc_etherdev: | |
pci_release_selected_regions(pdev, | |
- pci_select_bars(pdev, IORESOURCE_MEM)); | |
+ pci_select_bars(pdev, IORESOURCE_MEM)); | |
err_pci_reg: | |
err_dma: | |
pci_disable_device(pdev); | |
@@ -6321,14 +6872,15 @@ | |
* Hot-Plug event, or because the driver is going to be removed from | |
* memory. | |
**/ | |
-static void __devexit e1000_remove(struct pci_dev *pdev) | |
+static void e1000_remove(struct pci_dev *pdev) | |
{ | |
struct net_device *netdev = pci_get_drvdata(pdev); | |
struct e1000_adapter *adapter = netdev_priv(netdev); | |
bool down = test_bit(__E1000_DOWN, &adapter->state); | |
- /* | |
- * The timers may be rescheduled, so explicitly disable them | |
+ e1000e_ptp_remove(adapter); | |
+ | |
+ /* The timers may be rescheduled, so explicitly disable them | |
* from being rescheduled. | |
*/ | |
if (!down) | |
@@ -6342,6 +6894,14 @@ | |
cancel_work_sync(&adapter->update_phy_task); | |
cancel_work_sync(&adapter->print_hang_task); | |
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { | |
+ cancel_work_sync(&adapter->tx_hwtstamp_work); | |
+ if (adapter->tx_hwtstamp_skb) { | |
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb); | |
+ adapter->tx_hwtstamp_skb = NULL; | |
+ } | |
+ } | |
+ | |
if (!(netdev->flags & IFF_UP)) | |
e1000_power_down_phy(adapter); | |
@@ -6353,8 +6913,7 @@ | |
if (pci_dev_run_wake(pdev)) | |
pm_runtime_get_noresume(&pdev->dev); | |
- /* | |
- * Release control of h/w to f/w. If f/w is AMT enabled, this | |
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this | |
* would have already happened in close and is redundant. | |
*/ | |
e1000e_release_hw_control(adapter); | |
@@ -6367,7 +6926,7 @@ | |
if (adapter->hw.flash_address) | |
iounmap(adapter->hw.flash_address); | |
pci_release_selected_regions(pdev, | |
- pci_select_bars(pdev, IORESOURCE_MEM)); | |
+ pci_select_bars(pdev, IORESOURCE_MEM)); | |
free_netdev(netdev); | |
@@ -6378,7 +6937,7 @@ | |
} | |
/* PCI Error Recovery (ERS) */ | |
-static struct pci_error_handlers e1000_err_handler = { | |
+static const struct pci_error_handlers e1000_err_handler = { | |
.error_detected = e1000_io_error_detected, | |
.slot_reset = e1000_io_slot_reset, | |
.resume = e1000_io_resume, | |
@@ -6388,7 +6947,8 @@ | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, | |
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), | |
+ board_82571 }, | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, | |
@@ -6452,27 +7012,34 @@ | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, | |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, | |
- { } /* terminate list */ | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, | |
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, | |
+ | |
+ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ | |
}; | |
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | |
-#ifdef CONFIG_PM | |
static const struct dev_pm_ops e1000_pm_ops = { | |
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) | |
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend, | |
- e1000_runtime_resume, e1000_idle) | |
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, | |
+ e1000_idle) | |
}; | |
-#endif | |
/* PCI Device API Driver */ | |
static struct pci_driver e1000_driver = { | |
.name = e1000e_driver_name, | |
.id_table = e1000_pci_tbl, | |
.probe = e1000_probe, | |
- .remove = __devexit_p(e1000_remove), | |
-#ifdef CONFIG_PM | |
- .driver.pm = &e1000_pm_ops, | |
-#endif | |
+ .remove = e1000_remove, | |
+ .driver = { | |
+ .pm = &e1000_pm_ops, | |
+ }, | |
.shutdown = e1000_shutdown, | |
.err_handler = &e1000_err_handler | |
}; | |
@@ -6488,7 +7055,7 @@ | |
int ret; | |
pr_info("Intel(R) PRO/1000 Network Driver - %s\n", | |
e1000e_driver_version); | |
- pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); | |
+ pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n"); | |
ret = pci_register_driver(&e1000_driver); | |
return ret; | |
@@ -6507,10 +7074,9 @@ | |
} | |
module_exit(e1000_exit_module); | |
- | |
MODULE_AUTHOR("Intel Corporation, <[email protected]>"); | |
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); | |
MODULE_LICENSE("GPL"); | |
MODULE_VERSION(DRV_VERSION); | |
-/* e1000_main.c */ | |
+/* netdev.c */ | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: nvm.c | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: nvm.h | |
diff -ru e1000e/param.c /home/arch/linux/drivers/net/ethernet/intel/e1000e/param.c | |
--- e1000e/param.c 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/param.c 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -32,11 +32,9 @@ | |
#include "e1000.h" | |
-/* | |
- * This is the only thing that needs to be changed to adjust the | |
+/* This is the only thing that needs to be changed to adjust the | |
* maximum number of ports that the driver can manage. | |
*/ | |
- | |
#define E1000_MAX_NIC 32 | |
#define OPTION_UNSET -1 | |
@@ -47,24 +45,20 @@ | |
unsigned int copybreak = COPYBREAK_DEFAULT; | |
module_param(copybreak, uint, 0644); | |
MODULE_PARM_DESC(copybreak, | |
- "Maximum size of packet that is copied to a new buffer on receive"); | |
+ "Maximum size of packet that is copied to a new buffer on receive"); | |
-/* | |
- * All parameters are treated the same, as an integer array of values. | |
+/* All parameters are treated the same, as an integer array of values. | |
* This macro just reduces the need to repeat the same declaration code | |
* over and over (plus this helps to avoid typo bugs). | |
*/ | |
- | |
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } | |
#define E1000_PARAM(X, desc) \ | |
- static int __devinitdata X[E1000_MAX_NIC+1] \ | |
- = E1000_PARAM_INIT; \ | |
+ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ | |
static unsigned int num_##X; \ | |
module_param_array_named(X, X, int, &num_##X, 0); \ | |
MODULE_PARM_DESC(X, desc); | |
-/* | |
- * Transmit Interrupt Delay in units of 1.024 microseconds | |
+/* Transmit Interrupt Delay in units of 1.024 microseconds | |
* Tx interrupt delay needs to typically be set to something non-zero | |
* | |
* Valid Range: 0-65535 | |
@@ -74,8 +68,7 @@ | |
#define MAX_TXDELAY 0xFFFF | |
#define MIN_TXDELAY 0 | |
-/* | |
- * Transmit Absolute Interrupt Delay in units of 1.024 microseconds | |
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds | |
* | |
* Valid Range: 0-65535 | |
*/ | |
@@ -84,8 +77,7 @@ | |
#define MAX_TXABSDELAY 0xFFFF | |
#define MIN_TXABSDELAY 0 | |
-/* | |
- * Receive Interrupt Delay in units of 1.024 microseconds | |
+/* Receive Interrupt Delay in units of 1.024 microseconds | |
* hardware will likely hang if you set this to anything but zero. | |
* | |
* Valid Range: 0-65535 | |
@@ -94,8 +86,7 @@ | |
#define MAX_RXDELAY 0xFFFF | |
#define MIN_RXDELAY 0 | |
-/* | |
- * Receive Absolute Interrupt Delay in units of 1.024 microseconds | |
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds | |
* | |
* Valid Range: 0-65535 | |
*/ | |
@@ -103,10 +94,9 @@ | |
#define MAX_RXABSDELAY 0xFFFF | |
#define MIN_RXABSDELAY 0 | |
-/* | |
- * Interrupt Throttle Rate (interrupts/sec) | |
+/* Interrupt Throttle Rate (interrupts/sec) | |
* | |
- * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) | |
+ * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative | |
*/ | |
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | |
#define DEFAULT_ITR 3 | |
@@ -115,16 +105,23 @@ | |
/* IntMode (Interrupt Mode) | |
* | |
- * Valid Range: 0 - 2 | |
+ * Valid Range: varies depending on kernel configuration & hardware support | |
+ * | |
+ * legacy=0, MSI=1, MSI-X=2 | |
+ * | |
+ * When MSI/MSI-X support is enabled in kernel- | |
+ * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise | |
+ * When MSI/MSI-X support is not enabled in kernel- | |
+ * Default Value: 0 (legacy) | |
* | |
- * Default Value: 2 (MSI-X) | |
+ * When a mode is specified that is not allowed/supported, it will be | |
+ * demoted to the most advanced interrupt mode available. | |
*/ | |
E1000_PARAM(IntMode, "Interrupt Mode"); | |
#define MAX_INTMODE 2 | |
#define MIN_INTMODE 0 | |
-/* | |
- * Enable Smart Power Down of the PHY | |
+/* Enable Smart Power Down of the PHY | |
* | |
* Valid Range: 0, 1 | |
* | |
@@ -132,8 +129,7 @@ | |
*/ | |
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | |
-/* | |
- * Enable Kumeran Lock Loss workaround | |
+/* Enable Kumeran Lock Loss workaround | |
* | |
* Valid Range: 0, 1 | |
* | |
@@ -141,24 +137,23 @@ | |
*/ | |
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); | |
-/* | |
- * Write Protect NVM | |
+/* Write Protect NVM | |
* | |
* Valid Range: 0, 1 | |
* | |
* Default Value: 1 (enabled) | |
*/ | |
-E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); | |
+E1000_PARAM(WriteProtectNVM, | |
+ "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); | |
-/* | |
- * Enable CRC Stripping | |
+/* Enable CRC Stripping | |
* | |
* Valid Range: 0, 1 | |
* | |
* Default Value: 1 (enabled) | |
*/ | |
-E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ | |
- "the CRC"); | |
+E1000_PARAM(CrcStripping, | |
+ "Enable CRC Stripping, disable if your BMC needs the CRC"); | |
struct e1000_option { | |
enum { enable_option, range_option, list_option } type; | |
@@ -166,20 +161,25 @@ | |
const char *err; | |
int def; | |
union { | |
- struct { /* range_option info */ | |
+ /* range_option info */ | |
+ struct { | |
int min; | |
int max; | |
} r; | |
- struct { /* list_option info */ | |
+ /* list_option info */ | |
+ struct { | |
int nr; | |
- struct e1000_opt_list { int i; char *str; } *p; | |
+ struct e1000_opt_list { | |
+ int i; | |
+ char *str; | |
+ } *p; | |
} l; | |
} arg; | |
}; | |
-static int __devinit e1000_validate_option(unsigned int *value, | |
- const struct e1000_option *opt, | |
- struct e1000_adapter *adapter) | |
+static int e1000_validate_option(unsigned int *value, | |
+ const struct e1000_option *opt, | |
+ struct e1000_adapter *adapter) | |
{ | |
if (*value == OPTION_UNSET) { | |
*value = opt->def; | |
@@ -190,16 +190,19 @@ | |
case enable_option: | |
switch (*value) { | |
case OPTION_ENABLED: | |
- e_info("%s Enabled\n", opt->name); | |
+ dev_info(&adapter->pdev->dev, "%s Enabled\n", | |
+ opt->name); | |
return 0; | |
case OPTION_DISABLED: | |
- e_info("%s Disabled\n", opt->name); | |
+ dev_info(&adapter->pdev->dev, "%s Disabled\n", | |
+ opt->name); | |
return 0; | |
} | |
break; | |
case range_option: | |
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | |
- e_info("%s set to %i\n", opt->name, *value); | |
+ dev_info(&adapter->pdev->dev, "%s set to %i\n", | |
+ opt->name, *value); | |
return 0; | |
} | |
break; | |
@@ -211,7 +214,8 @@ | |
ent = &opt->arg.l.p[i]; | |
if (*value == ent->i) { | |
if (ent->str[0] != '\0') | |
- e_info("%s\n", ent->str); | |
+ dev_info(&adapter->pdev->dev, "%s\n", | |
+ ent->str); | |
return 0; | |
} | |
} | |
@@ -221,8 +225,8 @@ | |
BUG(); | |
} | |
- e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, | |
- opt->err); | |
+ dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n", | |
+ opt->name, *value, opt->err); | |
*value = opt->def; | |
return -1; | |
} | |
@@ -236,17 +240,20 @@ | |
* value exists, a default value is used. The final value is stored | |
* in a variable in the adapter structure. | |
**/ | |
-void __devinit e1000e_check_options(struct e1000_adapter *adapter) | |
+void e1000e_check_options(struct e1000_adapter *adapter) | |
{ | |
struct e1000_hw *hw = &adapter->hw; | |
int bd = adapter->bd_number; | |
if (bd >= E1000_MAX_NIC) { | |
- e_notice("Warning: no configuration for board #%i\n", bd); | |
- e_notice("Using defaults for all values\n"); | |
+ dev_notice(&adapter->pdev->dev, | |
+ "Warning: no configuration for board #%i\n", bd); | |
+ dev_notice(&adapter->pdev->dev, | |
+ "Using defaults for all values\n"); | |
} | |
- { /* Transmit Interrupt Delay */ | |
+ /* Transmit Interrupt Delay */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = range_option, | |
.name = "Transmit Interrupt Delay", | |
@@ -265,7 +272,8 @@ | |
adapter->tx_int_delay = opt.def; | |
} | |
} | |
- { /* Transmit Absolute Interrupt Delay */ | |
+ /* Transmit Absolute Interrupt Delay */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = range_option, | |
.name = "Transmit Absolute Interrupt Delay", | |
@@ -284,7 +292,8 @@ | |
adapter->tx_abs_int_delay = opt.def; | |
} | |
} | |
- { /* Receive Interrupt Delay */ | |
+ /* Receive Interrupt Delay */ | |
+ { | |
static struct e1000_option opt = { | |
.type = range_option, | |
.name = "Receive Interrupt Delay", | |
@@ -303,7 +312,8 @@ | |
adapter->rx_int_delay = opt.def; | |
} | |
} | |
- { /* Receive Absolute Interrupt Delay */ | |
+ /* Receive Absolute Interrupt Delay */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = range_option, | |
.name = "Receive Absolute Interrupt Delay", | |
@@ -322,7 +332,8 @@ | |
adapter->rx_abs_int_delay = opt.def; | |
} | |
} | |
- { /* Interrupt Throttling Rate */ | |
+ /* Interrupt Throttling Rate */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = range_option, | |
.name = "Interrupt Throttling Rate (ints/sec)", | |
@@ -335,65 +346,95 @@ | |
if (num_InterruptThrottleRate > bd) { | |
adapter->itr = InterruptThrottleRate[bd]; | |
- switch (adapter->itr) { | |
- case 0: | |
- e_info("%s turned off\n", opt.name); | |
- break; | |
- case 1: | |
- e_info("%s set to dynamic mode\n", opt.name); | |
- adapter->itr_setting = adapter->itr; | |
- adapter->itr = 20000; | |
- break; | |
- case 3: | |
- e_info("%s set to dynamic conservative mode\n", | |
- opt.name); | |
- adapter->itr_setting = adapter->itr; | |
- adapter->itr = 20000; | |
- break; | |
- case 4: | |
- e_info("%s set to simplified (2000-8000 ints) " | |
- "mode\n", opt.name); | |
- adapter->itr_setting = 4; | |
- break; | |
- default: | |
- /* | |
- * Save the setting, because the dynamic bits | |
- * change itr. | |
- */ | |
- if (e1000_validate_option(&adapter->itr, &opt, | |
- adapter) && | |
- (adapter->itr == 3)) { | |
- /* | |
- * In case of invalid user value, | |
- * default to conservative mode. | |
- */ | |
- adapter->itr_setting = adapter->itr; | |
- adapter->itr = 20000; | |
- } else { | |
- /* | |
- * Clear the lower two bits because | |
- * they are used as control. | |
- */ | |
- adapter->itr_setting = | |
- adapter->itr & ~3; | |
- } | |
- break; | |
- } | |
+ | |
+ /* Make sure a message is printed for non-special | |
+ * values. And in case of an invalid option, display | |
+ * warning, use default and go through itr/itr_setting | |
+ * adjustment logic below | |
+ */ | |
+ if ((adapter->itr > 4) && | |
+ e1000_validate_option(&adapter->itr, &opt, adapter)) | |
+ adapter->itr = opt.def; | |
} else { | |
- adapter->itr_setting = opt.def; | |
+ /* If no option specified, use default value and go | |
+ * through the logic below to adjust itr/itr_setting | |
+ */ | |
+ adapter->itr = opt.def; | |
+ | |
+ /* Make sure a message is printed for non-special | |
+ * default values | |
+ */ | |
+ if (adapter->itr > 4) | |
+ dev_info(&adapter->pdev->dev, | |
+ "%s set to default %d\n", opt.name, | |
+ adapter->itr); | |
+ } | |
+ | |
+ adapter->itr_setting = adapter->itr; | |
+ switch (adapter->itr) { | |
+ case 0: | |
+ dev_info(&adapter->pdev->dev, "%s turned off\n", | |
+ opt.name); | |
+ break; | |
+ case 1: | |
+ dev_info(&adapter->pdev->dev, | |
+ "%s set to dynamic mode\n", opt.name); | |
adapter->itr = 20000; | |
+ break; | |
+ case 3: | |
+ dev_info(&adapter->pdev->dev, | |
+ "%s set to dynamic conservative mode\n", | |
+ opt.name); | |
+ adapter->itr = 20000; | |
+ break; | |
+ case 4: | |
+ dev_info(&adapter->pdev->dev, | |
+ "%s set to simplified (2000-8000 ints) mode\n", | |
+ opt.name); | |
+ break; | |
+ default: | |
+ /* Save the setting, because the dynamic bits | |
+ * change itr. | |
+ * | |
+ * Clear the lower two bits because | |
+ * they are used as control. | |
+ */ | |
+ adapter->itr_setting &= ~3; | |
+ break; | |
} | |
} | |
- { /* Interrupt Mode */ | |
+ /* Interrupt Mode */ | |
+ { | |
static struct e1000_option opt = { | |
.type = range_option, | |
.name = "Interrupt Mode", | |
- .err = "defaulting to 2 (MSI-X)", | |
- .def = E1000E_INT_MODE_MSIX, | |
- .arg = { .r = { .min = MIN_INTMODE, | |
- .max = MAX_INTMODE } } | |
+#ifndef CONFIG_PCI_MSI | |
+ .err = "defaulting to 0 (legacy)", | |
+ .def = E1000E_INT_MODE_LEGACY, | |
+ .arg = { .r = { .min = 0, | |
+ .max = 0 } } | |
+#endif | |
}; | |
+#ifdef CONFIG_PCI_MSI | |
+ if (adapter->flags & FLAG_HAS_MSIX) { | |
+ opt.err = kstrdup("defaulting to 2 (MSI-X)", | |
+ GFP_KERNEL); | |
+ opt.def = E1000E_INT_MODE_MSIX; | |
+ opt.arg.r.max = E1000E_INT_MODE_MSIX; | |
+ } else { | |
+ opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL); | |
+ opt.def = E1000E_INT_MODE_MSI; | |
+ opt.arg.r.max = E1000E_INT_MODE_MSI; | |
+ } | |
+ | |
+ if (!opt.err) { | |
+ dev_err(&adapter->pdev->dev, | |
+ "Failed to allocate memory\n"); | |
+ return; | |
+ } | |
+#endif | |
+ | |
if (num_IntMode > bd) { | |
unsigned int int_mode = IntMode[bd]; | |
e1000_validate_option(&int_mode, &opt, adapter); | |
@@ -401,8 +442,13 @@ | |
} else { | |
adapter->int_mode = opt.def; | |
} | |
+ | |
+#ifdef CONFIG_PCI_MSI | |
+ kfree(opt.err); | |
+#endif | |
} | |
- { /* Smart Power Down */ | |
+ /* Smart Power Down */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = enable_option, | |
.name = "PHY Smart Power Down", | |
@@ -413,12 +459,12 @@ | |
if (num_SmartPowerDownEnable > bd) { | |
unsigned int spd = SmartPowerDownEnable[bd]; | |
e1000_validate_option(&spd, &opt, adapter); | |
- if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) | |
- && spd) | |
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) | |
adapter->flags |= FLAG_SMART_POWER_DOWN; | |
} | |
} | |
- { /* CRC Stripping */ | |
+ /* CRC Stripping */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = enable_option, | |
.name = "CRC Stripping", | |
@@ -429,33 +475,37 @@ | |
if (num_CrcStripping > bd) { | |
unsigned int crc_stripping = CrcStripping[bd]; | |
e1000_validate_option(&crc_stripping, &opt, adapter); | |
- if (crc_stripping == OPTION_ENABLED) | |
+ if (crc_stripping == OPTION_ENABLED) { | |
adapter->flags2 |= FLAG2_CRC_STRIPPING; | |
+ adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; | |
+ } | |
} else { | |
adapter->flags2 |= FLAG2_CRC_STRIPPING; | |
+ adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; | |
} | |
} | |
- { /* Kumeran Lock Loss Workaround */ | |
+ /* Kumeran Lock Loss Workaround */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = enable_option, | |
.name = "Kumeran Lock Loss Workaround", | |
.err = "defaulting to Enabled", | |
.def = OPTION_ENABLED | |
}; | |
+ bool enabled = opt.def; | |
if (num_KumeranLockLoss > bd) { | |
unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; | |
e1000_validate_option(&kmrn_lock_loss, &opt, adapter); | |
- if (hw->mac.type == e1000_ich8lan) | |
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, | |
- kmrn_lock_loss); | |
- } else { | |
- if (hw->mac.type == e1000_ich8lan) | |
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, | |
- opt.def); | |
+ enabled = kmrn_lock_loss; | |
} | |
+ | |
+ if (hw->mac.type == e1000_ich8lan) | |
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, | |
+ enabled); | |
} | |
- { /* Write-protect NVM */ | |
+ /* Write-protect NVM */ | |
+ { | |
static const struct e1000_option opt = { | |
.type = enable_option, | |
.name = "Write-protect NVM", | |
@@ -465,7 +515,8 @@ | |
if (adapter->flags & FLAG_IS_ICH) { | |
if (num_WriteProtectNVM > bd) { | |
- unsigned int write_protect_nvm = WriteProtectNVM[bd]; | |
+ unsigned int write_protect_nvm = | |
+ WriteProtectNVM[bd]; | |
e1000_validate_option(&write_protect_nvm, &opt, | |
adapter); | |
if (write_protect_nvm) | |
diff -ru e1000e/phy.c /home/arch/linux/drivers/net/ethernet/intel/e1000e/phy.c | |
--- e1000e/phy.c 2014-05-26 11:09:47.000000000 +0900 | |
+++ /home/arch/linux/drivers/net/ethernet/intel/e1000e/phy.c 2014-05-26 08:36:41.000000000 +0900 | |
@@ -1,7 +1,7 @@ | |
/******************************************************************************* | |
Intel PRO/1000 Linux driver | |
- Copyright(c) 1999 - 2011 Intel Corporation. | |
+ Copyright(c) 1999 - 2013 Intel Corporation. | |
This program is free software; you can redistribute it and/or modify it | |
under the terms and conditions of the GNU General Public License, | |
@@ -26,24 +26,20 @@ | |
*******************************************************************************/ | |
-#include <linux/delay.h> | |
- | |
#include "e1000.h" | |
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); | |
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); | |
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); | |
static s32 e1000_wait_autoneg(struct e1000_hw *hw); | |
-static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); | |
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |
u16 *data, bool read, bool page_set); | |
static u32 e1000_get_phy_addr_for_hv_page(u32 page); | |
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |
- u16 *data, bool read); | |
+ u16 *data, bool read); | |
/* Cable length tables */ | |
static const u16 e1000_m88_cable_length_table[] = { | |
- 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; | |
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED | |
+}; | |
+ | |
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ | |
ARRAY_SIZE(e1000_m88_cable_length_table) | |
@@ -55,51 +51,12 @@ | |
66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, | |
87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, | |
100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, | |
- 124}; | |
+ 124 | |
+}; | |
+ | |
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ | |
ARRAY_SIZE(e1000_igp_2_cable_length_table) | |
-#define BM_PHY_REG_PAGE(offset) \ | |
- ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) | |
-#define BM_PHY_REG_NUM(offset) \ | |
- ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ | |
- (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ | |
- ~MAX_PHY_REG_ADDRESS))) | |
- | |
-#define HV_INTC_FC_PAGE_START 768 | |
-#define I82578_ADDR_REG 29 | |
-#define I82577_ADDR_REG 16 | |
-#define I82577_CFG_REG 22 | |
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) | |
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ | |
-#define I82577_CTRL_REG 23 | |
- | |
-/* 82577 specific PHY registers */ | |
-#define I82577_PHY_CTRL_2 18 | |
-#define I82577_PHY_STATUS_2 26 | |
-#define I82577_PHY_DIAG_STATUS 31 | |
- | |
-/* I82577 PHY Status 2 */ | |
-#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 | |
-#define I82577_PHY_STATUS2_MDIX 0x0800 | |
-#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 | |
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 | |
- | |
-/* I82577 PHY Control 2 */ | |
-#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 | |
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 | |
- | |
-/* I82577 PHY Diagnostics Status */ | |
-#define I82577_DSTATUS_CABLE_LENGTH 0x03FC | |
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 | |
- | |
-/* BM PHY Copper Specific Control 1 */ | |
-#define BM_CS_CTRL1 16 | |
- | |
-#define HV_MUX_DATA_CTRL PHY_REG(776, 16) | |
-#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 | |
-#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 | |
- | |
/** | |
* e1000e_check_reset_block_generic - Check if PHY reset is blocked | |
* @hw: pointer to the HW structure | |
@@ -114,8 +71,7 @@ | |
manc = er32(MANC); | |
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? | |
- E1000_BLK_PHY_RESET : 0; | |
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; | |
} | |
/** | |
@@ -132,30 +88,30 @@ | |
u16 phy_id; | |
u16 retry_count = 0; | |
- if (!(phy->ops.read_reg)) | |
- goto out; | |
+ if (!phy->ops.read_reg) | |
+ return 0; | |
while (retry_count < 2) { | |
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); | |
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
phy->id = (u32)(phy_id << 16); | |
- udelay(20); | |
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); | |
+ usleep_range(20, 40); | |
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
phy->id |= (u32)(phy_id & PHY_REVISION_MASK); | |
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); | |
if (phy->id != 0 && phy->id != PHY_REVISION_MASK) | |
- goto out; | |
+ return 0; | |
retry_count++; | |
} | |
-out: | |
- return ret_val; | |
+ | |
+ return 0; | |
} | |
/** | |
@@ -194,8 +150,7 @@ | |
return -E1000_ERR_PARAM; | |
} | |
- /* | |
- * Set up Op-code, Phy Address, and register offset in the MDI | |
+ /* Set up Op-code, Phy Address, and register offset in the MDI | |
* Control register. The MAC will take care of interfacing with the | |
* PHY to retrieve the desired data. | |
*/ | |
@@ -205,8 +160,7 @@ | |
ew32(MDIC, mdic); | |
- /* | |
- * Poll the ready bit to see if the MDI read completed | |
+ /* Poll the ready bit to see if the MDI read completed | |
* Increasing the time out as testing showed failures with | |
* the lower time out | |
*/ | |
@@ -224,10 +178,15 @@ | |
e_dbg("MDI Error\n"); | |
return -E1000_ERR_PHY; | |
} | |
- *data = (u16) mdic; | |
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { | |
+ e_dbg("MDI Read offset error - requested %d, returned %d\n", | |
+ offset, | |
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); | |
+ return -E1000_ERR_PHY; | |
+ } | |
+ *data = (u16)mdic; | |
- /* | |
- * Allow some time after each MDIC transaction to avoid | |
+ /* Allow some time after each MDIC transaction to avoid | |
* reading duplicate data in the next MDIC transaction. | |
*/ | |
if (hw->mac.type == e1000_pch2lan) | |
@@ -254,8 +213,7 @@ | |
return -E1000_ERR_PARAM; | |
} | |
- /* | |
- * Set up Op-code, Phy Address, and register offset in the MDI | |
+ /* Set up Op-code, Phy Address, and register offset in the MDI | |
* Control register. The MAC will take care of interfacing with the | |
* PHY to retrieve the desired data. | |
*/ | |
@@ -266,8 +224,7 @@ | |
ew32(MDIC, mdic); | |
- /* | |
- * Poll the ready bit to see if the MDI read completed | |
+ /* Poll the ready bit to see if the MDI read completed | |
* Increasing the time out as testing showed failures with | |
* the lower time out | |
*/ | |
@@ -285,9 +242,14 @@ | |
e_dbg("MDI Error\n"); | |
return -E1000_ERR_PHY; | |
} | |
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { | |
+ e_dbg("MDI Write offset error - requested %d, returned %d\n", | |
+ offset, | |
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); | |
+ return -E1000_ERR_PHY; | |
+ } | |
- /* | |
- * Allow some time after each MDIC transaction to avoid | |
+ /* Allow some time after each MDIC transaction to avoid | |
* reading duplicate data in the next MDIC transaction. | |
*/ | |
if (hw->mac.type == e1000_pch2lan) | |
@@ -377,34 +339,30 @@ | |
* semaphores before exiting. | |
**/ | |
static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | |
- bool locked) | |
+ bool locked) | |
{ | |
s32 ret_val = 0; | |
if (!locked) { | |
- if (!(hw->phy.ops.acquire)) | |
- goto out; | |
+ if (!hw->phy.ops.acquire) | |
+ return 0; | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
- if (offset > MAX_PHY_MULTI_PAGE_REG) { | |
+ if (offset > MAX_PHY_MULTI_PAGE_REG) | |
ret_val = e1000e_write_phy_reg_mdic(hw, | |
IGP01E1000_PHY_PAGE_SELECT, | |
(u16)offset); | |
- if (ret_val) | |
- goto release; | |
- } | |
- | |
- ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
- | |
-release: | |
+ if (!ret_val) | |
+ ret_val = e1000e_read_phy_reg_mdic(hw, | |
+ MAX_PHY_REG_ADDRESS & offset, | |
+ data); | |
if (!locked) | |
hw->phy.ops.release(hw); | |
-out: | |
+ | |
return ret_val; | |
} | |
@@ -448,35 +406,29 @@ | |
* at the offset. Release any acquired semaphores before exiting. | |
**/ | |
static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | |
- bool locked) | |
+ bool locked) | |
{ | |
s32 ret_val = 0; | |
if (!locked) { | |
- if (!(hw->phy.ops.acquire)) | |
- goto out; | |
+ if (!hw->phy.ops.acquire) | |
+ return 0; | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
- if (offset > MAX_PHY_MULTI_PAGE_REG) { | |
+ if (offset > MAX_PHY_MULTI_PAGE_REG) | |
ret_val = e1000e_write_phy_reg_mdic(hw, | |
IGP01E1000_PHY_PAGE_SELECT, | |
(u16)offset); | |
- if (ret_val) | |
- goto release; | |
- } | |
- | |
- ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
- | |
-release: | |
+ if (!ret_val) | |
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & | |
+ offset, data); | |
if (!locked) | |
hw->phy.ops.release(hw); | |
-out: | |
return ret_val; | |
} | |
@@ -520,18 +472,19 @@ | |
* Release any acquired semaphores before exiting. | |
**/ | |
static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |
- bool locked) | |
+ bool locked) | |
{ | |
u32 kmrnctrlsta; | |
- s32 ret_val = 0; | |
if (!locked) { | |
- if (!(hw->phy.ops.acquire)) | |
- goto out; | |
+ s32 ret_val = 0; | |
+ | |
+ if (!hw->phy.ops.acquire) | |
+ return 0; | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | |
@@ -547,8 +500,7 @@ | |
if (!locked) | |
hw->phy.ops.release(hw); | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -593,18 +545,19 @@ | |
* before exiting. | |
**/ | |
static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |
- bool locked) | |
+ bool locked) | |
{ | |
u32 kmrnctrlsta; | |
- s32 ret_val = 0; | |
if (!locked) { | |
- if (!(hw->phy.ops.acquire)) | |
- goto out; | |
+ s32 ret_val = 0; | |
+ | |
+ if (!hw->phy.ops.acquire) | |
+ return 0; | |
ret_val = hw->phy.ops.acquire(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | |
@@ -617,8 +570,7 @@ | |
if (!locked) | |
hw->phy.ops.release(hw); | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -650,6 +602,45 @@ | |
} | |
/** | |
+ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode | |
+ * @hw: pointer to the HW structure | |
+ * | |
+ * Sets up Master/slave mode | |
+ **/ | |
+static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) | |
+{ | |
+ s32 ret_val; | |
+ u16 phy_data; | |
+ | |
+ /* Resolve Master/Slave mode */ | |
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ /* load defaults for future use */ | |
+ hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ? | |
+ ((phy_data & CTL1000_AS_MASTER) ? | |
+ e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; | |
+ | |
+ switch (hw->phy.ms_type) { | |
+ case e1000_ms_force_master: | |
+ phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); | |
+ break; | |
+ case e1000_ms_force_slave: | |
+ phy_data |= CTL1000_ENABLE_MASTER; | |
+ phy_data &= ~(CTL1000_AS_MASTER); | |
+ break; | |
+ case e1000_ms_auto: | |
+ phy_data &= ~CTL1000_ENABLE_MASTER; | |
+ /* fall-through */ | |
+ default: | |
+ break; | |
+ } | |
+ | |
+ return e1e_wphy(hw, MII_CTRL1000, phy_data); | |
+} | |
+ | |
+/** | |
* e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link | |
* @hw: pointer to the HW structure | |
* | |
@@ -663,7 +654,7 @@ | |
/* Enable CRS on Tx. This must be set for half-duplex operation. */ | |
ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; | |
@@ -671,9 +662,35 @@ | |
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; | |
ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); | |
+ if (ret_val) | |
+ return ret_val; | |
-out: | |
- return ret_val; | |
+ /* Set MDI/MDIX mode */ | |
+ ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data); | |
+ if (ret_val) | |
+ return ret_val; | |
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; | |
+ /* Options: | |
+ * 0 - Auto (default) | |
+ * 1 - MDI mode | |
+ * 2 - MDI-X mode | |
+ */ | |
+ switch (hw->phy.mdix) { | |
+ case 1: | |
+ break; | |
+ case 2: | |
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; | |
+ break; | |
+ case 0: | |
+ default: | |
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; | |
+ break; | |
+ } | |
+ ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data); | |
+ if (ret_val) | |
+ return ret_val; | |
+ | |
+ return e1000_set_master_slave_mode(hw); | |
} | |
/** | |
@@ -698,8 +715,7 @@ | |
if (phy->type != e1000_phy_bm) | |
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; | |
- /* | |
- * Options: | |
+ /* Options: | |
* MDI/MDI-X = 0 (default) | |
* 0 - Auto for all speeds | |
* 1 - MDI mode | |
@@ -724,20 +740,35 @@ | |
break; | |
} | |
- /* | |
- * Options: | |
+ /* Options: | |
* disable_polarity_correction = 0 (default) | |
* Automatic Correction for Reversed Cable Polarity | |
* 0 - Disabled | |
* 1 - Enabled | |
*/ | |
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; | |
- if (phy->disable_polarity_correction == 1) | |
+ if (phy->disable_polarity_correction) | |
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; | |
/* Enable downshift on BM (disabled by default) */ | |
- if (phy->type == e1000_phy_bm) | |
+ if (phy->type == e1000_phy_bm) { | |
+ /* For 82574/82583, first disable then enable downshift */ | |
+ if (phy->id == BME1000_E_PHY_ID_R2) { | |
+ phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; | |
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, | |
+ phy_data); | |
+ if (ret_val) | |
+ return ret_val; | |
+ /* Commit the changes. */ | |
+ ret_val = phy->ops.commit(hw); | |
+ if (ret_val) { | |
+ e_dbg("Error committing the PHY changes\n"); | |
+ return ret_val; | |
+ } | |
+ } | |
+ | |
phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; | |
+ } | |
ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); | |
if (ret_val) | |
@@ -746,8 +777,7 @@ | |
if ((phy->type == e1000_phy_m88) && | |
(phy->revision < E1000_REVISION_4) && | |
(phy->id != BME1000_E_PHY_ID_R2)) { | |
- /* | |
- * Force TX_CLK in the Extended PHY Specific Control Register | |
+ /* Force TX_CLK in the Extended PHY Specific Control Register | |
* to 25MHz clock. | |
*/ | |
ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | |
@@ -756,8 +786,7 @@ | |
phy_data |= M88E1000_EPSCR_TX_CLK_25; | |
- if ((phy->revision == 2) && | |
- (phy->id == M88E1111_I_PHY_ID)) { | |
+ if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { | |
/* 82573L PHY - set the downshift counter to 5x. */ | |
phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; | |
phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; | |
@@ -786,10 +815,12 @@ | |
} | |
/* Commit the changes. */ | |
- ret_val = e1000e_commit_phy(hw); | |
- if (ret_val) { | |
- e_dbg("Error committing the PHY changes\n"); | |
- return ret_val; | |
+ if (phy->ops.commit) { | |
+ ret_val = phy->ops.commit(hw); | |
+ if (ret_val) { | |
+ e_dbg("Error committing the PHY changes\n"); | |
+ return ret_val; | |
+ } | |
} | |
if (phy->type == e1000_phy_82578) { | |
@@ -827,17 +858,18 @@ | |
return ret_val; | |
} | |
- /* | |
- * Wait 100ms for MAC to configure PHY from NVM settings, to avoid | |
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid | |
* timeout issues when LFS is enabled. | |
*/ | |
msleep(100); | |
/* disable lplu d0 during driver init */ | |
- ret_val = e1000_set_d0_lplu_state(hw, false); | |
- if (ret_val) { | |
- e_dbg("Error Disabling LPLU D0\n"); | |
- return ret_val; | |
+ if (hw->phy.ops.set_d0_lplu_state) { | |
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); | |
+ if (ret_val) { | |
+ e_dbg("Error Disabling LPLU D0\n"); | |
+ return ret_val; | |
+ } | |
} | |
/* Configure mdi-mdix settings */ | |
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); | |
@@ -864,8 +896,7 @@ | |
/* set auto-master slave resolution settings */ | |
if (hw->mac.autoneg) { | |
- /* | |
- * when autonegotiation advertisement is only 1000Mbps then we | |
+ /* when autonegotiation advertisement is only 1000Mbps then we | |
* should disable SmartSpeed and enable Auto MasterSlave | |
* resolution as hardware default. | |
*/ | |
@@ -883,41 +914,17 @@ | |
return ret_val; | |
/* Set auto Master/Slave resolution process */ | |
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); | |
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &data); | |
if (ret_val) | |
return ret_val; | |
- data &= ~CR_1000T_MS_ENABLE; | |
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); | |
+ data &= ~CTL1000_ENABLE_MASTER; | |
+ ret_val = e1e_wphy(hw, MII_CTRL1000, data); | |
if (ret_val) | |
return ret_val; | |
} | |
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); | |
- if (ret_val) | |
- return ret_val; | |
- | |
- /* load defaults for future use */ | |
- phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? | |
- ((data & CR_1000T_MS_VALUE) ? | |
- e1000_ms_force_master : | |
- e1000_ms_force_slave) : | |
- e1000_ms_auto; | |
- | |
- switch (phy->ms_type) { | |
- case e1000_ms_force_master: | |
- data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); | |
- break; | |
- case e1000_ms_force_slave: | |
- data |= CR_1000T_MS_ENABLE; | |
- data &= ~(CR_1000T_MS_VALUE); | |
- break; | |
- case e1000_ms_auto: | |
- data &= ~CR_1000T_MS_ENABLE; | |
- default: | |
- break; | |
- } | |
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); | |
+ ret_val = e1000_set_master_slave_mode(hw); | |
} | |
return ret_val; | |
@@ -942,60 +949,57 @@ | |
phy->autoneg_advertised &= phy->autoneg_mask; | |
/* Read the MII Auto-Neg Advertisement Register (Address 4). */ | |
- ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); | |
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg); | |
if (ret_val) | |
return ret_val; | |
if (phy->autoneg_mask & ADVERTISE_1000_FULL) { | |
/* Read the MII 1000Base-T Control Register (Address 9). */ | |
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); | |
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg); | |
if (ret_val) | |
return ret_val; | |
} | |
- /* | |
- * Need to parse both autoneg_advertised and fc and set up | |
+ /* Need to parse both autoneg_advertised and fc and set up | |
* the appropriate PHY registers. First we will parse for | |
* autoneg_advertised software override. Since we can advertise | |
* a plethora of combinations, we need to check each bit | |
* individually. | |
*/ | |
- /* | |
- * First we clear all the 10/100 mb speed bits in the Auto-Neg | |
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg | |
* Advertisement Register (Address 4) and the 1000 mb speed bits in | |
* the 1000Base-T Control Register (Address 9). | |
*/ | |
- mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | | |
- NWAY_AR_100TX_HD_CAPS | | |
- NWAY_AR_10T_FD_CAPS | | |
- NWAY_AR_10T_HD_CAPS); | |
- mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); | |
+ mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL | | |
+ ADVERTISE_100HALF | | |
+ ADVERTISE_10FULL | ADVERTISE_10HALF); | |
+ mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); | |
e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); | |
/* Do we want to advertise 10 Mb Half Duplex? */ | |
if (phy->autoneg_advertised & ADVERTISE_10_HALF) { | |
e_dbg("Advertise 10mb Half duplex\n"); | |
- mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; | |
+ mii_autoneg_adv_reg |= ADVERTISE_10HALF; | |
} | |
/* Do we want to advertise 10 Mb Full Duplex? */ | |
if (phy->autoneg_advertised & ADVERTISE_10_FULL) { | |
e_dbg("Advertise 10mb Full duplex\n"); | |
- mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; | |
+ mii_autoneg_adv_reg |= ADVERTISE_10FULL; | |
} | |
/* Do we want to advertise 100 Mb Half Duplex? */ | |
if (phy->autoneg_advertised & ADVERTISE_100_HALF) { | |
e_dbg("Advertise 100mb Half duplex\n"); | |
- mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; | |
+ mii_autoneg_adv_reg |= ADVERTISE_100HALF; | |
} | |
/* Do we want to advertise 100 Mb Full Duplex? */ | |
if (phy->autoneg_advertised & ADVERTISE_100_FULL) { | |
e_dbg("Advertise 100mb Full duplex\n"); | |
- mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; | |
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL; | |
} | |
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */ | |
@@ -1005,38 +1009,36 @@ | |
/* Do we want to advertise 1000 Mb Full Duplex? */ | |
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { | |
e_dbg("Advertise 1000mb Full duplex\n"); | |
- mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | |
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; | |
} | |
- /* | |
- * Check for a software override of the flow control settings, and | |
+ /* Check for a software override of the flow control settings, and | |
* setup the PHY advertisement registers accordingly. If | |
* auto-negotiation is enabled, then software will have to set the | |
* "PAUSE" bits to the correct value in the Auto-Negotiation | |
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- | |
+ * Advertisement Register (MII_ADVERTISE) and re-start auto- | |
* negotiation. | |
* | |
* The possible values of the "fc" parameter are: | |
* 0: Flow control is completely disabled | |
* 1: Rx flow control is enabled (we can receive pause frames | |
- * but not send pause frames). | |
+ * but not send pause frames). | |
* 2: Tx flow control is enabled (we can send pause frames | |
- * but we do not support receiving pause frames). | |
+ * but we do not support receiving pause frames). | |
* 3: Both Rx and Tx flow control (symmetric) are enabled. | |
* other: No software override. The flow control configuration | |
- * in the EEPROM is used. | |
+ * in the EEPROM is used. | |
*/ | |
switch (hw->fc.current_mode) { | |
case e1000_fc_none: | |
- /* | |
- * Flow control (Rx & Tx) is completely disabled by a | |
+ /* Flow control (Rx & Tx) is completely disabled by a | |
* software over-ride. | |
*/ | |
- mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | |
+ mii_autoneg_adv_reg &= | |
+ ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); | |
break; | |
case e1000_fc_rx_pause: | |
- /* | |
- * Rx Flow control is enabled, and Tx Flow control is | |
+ /* Rx Flow control is enabled, and Tx Flow control is | |
* disabled, by a software over-ride. | |
* | |
* Since there really isn't a way to advertise that we are | |
@@ -1045,37 +1047,36 @@ | |
* (in e1000e_config_fc_after_link_up) we will disable the | |
* hw's ability to send PAUSE frames. | |
*/ | |
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | |
+ mii_autoneg_adv_reg |= | |
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); | |
break; | |
case e1000_fc_tx_pause: | |
- /* | |
- * Tx Flow control is enabled, and Rx Flow control is | |
+ /* Tx Flow control is enabled, and Rx Flow control is | |
* disabled, by a software over-ride. | |
*/ | |
- mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; | |
- mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; | |
+ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; | |
+ mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; | |
break; | |
case e1000_fc_full: | |
- /* | |
- * Flow control (both Rx and Tx) is enabled by a software | |
+ /* Flow control (both Rx and Tx) is enabled by a software | |
* over-ride. | |
*/ | |
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | |
+ mii_autoneg_adv_reg |= | |
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); | |
break; | |
default: | |
e_dbg("Flow control param set incorrectly\n"); | |
- ret_val = -E1000_ERR_CONFIG; | |
- return ret_val; | |
+ return -E1000_ERR_CONFIG; | |
} | |
- ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); | |
+ ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg); | |
if (ret_val) | |
return ret_val; | |
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | |
if (phy->autoneg_mask & ADVERTISE_1000_FULL) | |
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | |
+ ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg); | |
return ret_val; | |
} | |
@@ -1095,17 +1096,15 @@ | |
s32 ret_val; | |
u16 phy_ctrl; | |
- /* | |
- * Perform some bounds checking on the autoneg advertisement | |
+ /* Perform some bounds checking on the autoneg advertisement | |
* parameter. | |
*/ | |
phy->autoneg_advertised &= phy->autoneg_mask; | |
- /* | |
- * If autoneg_advertised is zero, we assume it was not defaulted | |
+ /* If autoneg_advertised is zero, we assume it was not defaulted | |
* by the calling code so we set to advertise full capability. | |
*/ | |
- if (phy->autoneg_advertised == 0) | |
+ if (!phy->autoneg_advertised) | |
phy->autoneg_advertised = phy->autoneg_mask; | |
e_dbg("Reconfiguring auto-neg advertisement params\n"); | |
@@ -1116,33 +1115,30 @@ | |
} | |
e_dbg("Restarting Auto-Neg\n"); | |
- /* | |
- * Restart auto-negotiation by setting the Auto Neg Enable bit and | |
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and | |
* the Auto Neg Restart bit in the PHY control register. | |
*/ | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); | |
if (ret_val) | |
return ret_val; | |
- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); | |
+ phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Does the user want to wait for Auto-Neg to complete here, or | |
+ /* Does the user want to wait for Auto-Neg to complete here, or | |
* check at a later time (for example, callback routine). | |
*/ | |
if (phy->autoneg_wait_to_complete) { | |
ret_val = e1000_wait_autoneg(hw); | |
if (ret_val) { | |
- e_dbg("Error while waiting for " | |
- "autoneg to complete\n"); | |
+ e_dbg("Error while waiting for autoneg to complete\n"); | |
return ret_val; | |
} | |
} | |
- hw->mac.get_link_status = 1; | |
+ hw->mac.get_link_status = true; | |
return ret_val; | |
} | |
@@ -1162,40 +1158,35 @@ | |
bool link; | |
if (hw->mac.autoneg) { | |
- /* | |
- * Setup autoneg and flow control advertisement and perform | |
+ /* Setup autoneg and flow control advertisement and perform | |
* autonegotiation. | |
*/ | |
ret_val = e1000_copper_link_autoneg(hw); | |
if (ret_val) | |
return ret_val; | |
} else { | |
- /* | |
- * PHY will be set to 10H, 10F, 100H or 100F | |
+ /* PHY will be set to 10H, 10F, 100H or 100F | |
* depending on user settings. | |
*/ | |
e_dbg("Forcing Speed and Duplex\n"); | |
- ret_val = e1000_phy_force_speed_duplex(hw); | |
+ ret_val = hw->phy.ops.force_speed_duplex(hw); | |
if (ret_val) { | |
e_dbg("Error Forcing Speed and Duplex\n"); | |
return ret_val; | |
} | |
} | |
- /* | |
- * Check link status. Wait up to 100 microseconds for link to become | |
+ /* Check link status. Wait up to 100 microseconds for link to become | |
* valid. | |
*/ | |
- ret_val = e1000e_phy_has_link_generic(hw, | |
- COPPER_LINK_UP_LIMIT, | |
- 10, | |
- &link); | |
+ ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, | |
+ &link); | |
if (ret_val) | |
return ret_val; | |
if (link) { | |
e_dbg("Valid link established!!!\n"); | |
- e1000e_config_collision_dist(hw); | |
+ hw->mac.ops.config_collision_dist(hw); | |
ret_val = e1000e_config_fc_after_link_up(hw); | |
} else { | |
e_dbg("Unable to establish link!!!\n"); | |
@@ -1219,18 +1210,17 @@ | |
u16 phy_data; | |
bool link; | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); | |
if (ret_val) | |
return ret_val; | |
e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | |
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data); | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Clear Auto-Crossover to force MDI manually. IGP requires MDI | |
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI | |
* forced whenever speed and duplex are forced. | |
*/ | |
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); | |
@@ -1251,10 +1241,8 @@ | |
if (phy->autoneg_wait_to_complete) { | |
e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); | |
- ret_val = e1000e_phy_has_link_generic(hw, | |
- PHY_FORCE_LIMIT, | |
- 100000, | |
- &link); | |
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
+ 100000, &link); | |
if (ret_val) | |
return ret_val; | |
@@ -1262,12 +1250,8 @@ | |
e_dbg("Link taking longer than expected.\n"); | |
/* Try once more */ | |
- ret_val = e1000e_phy_has_link_generic(hw, | |
- PHY_FORCE_LIMIT, | |
- 100000, | |
- &link); | |
- if (ret_val) | |
- return ret_val; | |
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
+ 100000, &link); | |
} | |
return ret_val; | |
@@ -1290,8 +1274,7 @@ | |
u16 phy_data; | |
bool link; | |
- /* | |
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | |
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | |
* forced whenever speed and duplex are forced. | |
*/ | |
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | |
@@ -1305,26 +1288,28 @@ | |
e_dbg("M88E1000 PSCR: %X\n", phy_data); | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); | |
if (ret_val) | |
return ret_val; | |
e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | |
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data); | |
if (ret_val) | |
return ret_val; | |
/* Reset the phy to commit changes. */ | |
- ret_val = e1000e_commit_phy(hw); | |
- if (ret_val) | |
- return ret_val; | |
+ if (hw->phy.ops.commit) { | |
+ ret_val = hw->phy.ops.commit(hw); | |
+ if (ret_val) | |
+ return ret_val; | |
+ } | |
if (phy->autoneg_wait_to_complete) { | |
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); | |
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
- 100000, &link); | |
+ 100000, &link); | |
if (ret_val) | |
return ret_val; | |
@@ -1332,8 +1317,7 @@ | |
if (hw->phy.type != e1000_phy_m88) { | |
e_dbg("Link taking longer than expected.\n"); | |
} else { | |
- /* | |
- * We didn't get link. | |
+ /* We didn't get link. | |
* Reset the DSP and cross our fingers. | |
*/ | |
ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, | |
@@ -1348,7 +1332,7 @@ | |
/* Try once more */ | |
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
- 100000, &link); | |
+ 100000, &link); | |
if (ret_val) | |
return ret_val; | |
} | |
@@ -1360,8 +1344,7 @@ | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Resetting the phy means we need to re-force TX_CLK in the | |
+ /* Resetting the phy means we need to re-force TX_CLK in the | |
* Extended PHY Specific Control Register to 25MHz clock from | |
* the reset value of 2.5MHz. | |
*/ | |
@@ -1370,8 +1353,7 @@ | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * In addition, we must re-enable CRS on Tx for both half and full | |
+ /* In addition, we must re-enable CRS on Tx for both half and full | |
* duplex. | |
*/ | |
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | |
@@ -1399,27 +1381,27 @@ | |
u16 data; | |
bool link; | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &data); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1000e_phy_force_speed_duplex_setup(hw, &data); | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, data); | |
+ ret_val = e1e_wphy(hw, MII_BMCR, data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
/* Disable MDI-X support for 10/100 */ | |
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
data &= ~IFE_PMC_AUTO_MDIX; | |
data &= ~IFE_PMC_FORCE_MDIX; | |
ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e_dbg("IFE PMC: %X\n", data); | |
@@ -1428,39 +1410,34 @@ | |
if (phy->autoneg_wait_to_complete) { | |
e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); | |
- ret_val = e1000e_phy_has_link_generic(hw, | |
- PHY_FORCE_LIMIT, | |
- 100000, | |
- &link); | |
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
+ 100000, &link); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (!link) | |
e_dbg("Link taking longer than expected.\n"); | |
/* Try once more */ | |
- ret_val = e1000e_phy_has_link_generic(hw, | |
- PHY_FORCE_LIMIT, | |
- 100000, | |
- &link); | |
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
+ 100000, &link); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
* e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex | |
* @hw: pointer to the HW structure | |
- * @phy_ctrl: pointer to current value of PHY_CONTROL | |
+ * @phy_ctrl: pointer to current value of MII_BMCR | |
* | |
* Forces speed and duplex on the PHY by doing the following: disable flow | |
* control, force speed/duplex on the MAC, disable auto speed detection, | |
* disable auto-negotiation, configure duplex, configure speed, configure | |
* the collision distance, write configuration to CTRL register. The | |
- * caller must write to the PHY_CONTROL register for these settings to | |
+ * caller must write to the MII_BMCR register for these settings to | |
* take affect. | |
**/ | |
void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |
@@ -1480,33 +1457,32 @@ | |
ctrl &= ~E1000_CTRL_ASDE; | |
/* Disable autoneg on the phy */ | |
- *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; | |
+ *phy_ctrl &= ~BMCR_ANENABLE; | |
/* Forcing Full or Half Duplex? */ | |
if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { | |
ctrl &= ~E1000_CTRL_FD; | |
- *phy_ctrl &= ~MII_CR_FULL_DUPLEX; | |
+ *phy_ctrl &= ~BMCR_FULLDPLX; | |
e_dbg("Half Duplex\n"); | |
} else { | |
ctrl |= E1000_CTRL_FD; | |
- *phy_ctrl |= MII_CR_FULL_DUPLEX; | |
+ *phy_ctrl |= BMCR_FULLDPLX; | |
e_dbg("Full Duplex\n"); | |
} | |
/* Forcing 10mb or 100mb? */ | |
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { | |
ctrl |= E1000_CTRL_SPD_100; | |
- *phy_ctrl |= MII_CR_SPEED_100; | |
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); | |
+ *phy_ctrl |= BMCR_SPEED100; | |
+ *phy_ctrl &= ~BMCR_SPEED1000; | |
e_dbg("Forcing 100mb\n"); | |
} else { | |
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | |
- *phy_ctrl |= MII_CR_SPEED_10; | |
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); | |
+ *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100); | |
e_dbg("Forcing 10mb\n"); | |
} | |
- e1000e_config_collision_dist(hw); | |
+ hw->mac.ops.config_collision_dist(hw); | |
ew32(CTRL, ctrl); | |
} | |
@@ -1540,8 +1516,7 @@ | |
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | |
* during Dx states where the power conservation is most | |
* important. During driver activity we should enable | |
* SmartSpeed, so performance is maintained. | |
@@ -1608,13 +1583,13 @@ | |
case e1000_phy_gg82563: | |
case e1000_phy_bm: | |
case e1000_phy_82578: | |
- offset = M88E1000_PHY_SPEC_STATUS; | |
- mask = M88E1000_PSSR_DOWNSHIFT; | |
+ offset = M88E1000_PHY_SPEC_STATUS; | |
+ mask = M88E1000_PSSR_DOWNSHIFT; | |
break; | |
case e1000_phy_igp_2: | |
case e1000_phy_igp_3: | |
- offset = IGP01E1000_PHY_LINK_HEALTH; | |
- mask = IGP01E1000_PLHR_SS_DOWNGRADE; | |
+ offset = IGP01E1000_PHY_LINK_HEALTH; | |
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE; | |
break; | |
default: | |
/* speed downshift not supported */ | |
@@ -1625,7 +1600,7 @@ | |
ret_val = e1e_rphy(hw, offset, &phy_data); | |
if (!ret_val) | |
- phy->speed_downgraded = (phy_data & mask); | |
+ phy->speed_downgraded = !!(phy_data & mask); | |
return ret_val; | |
} | |
@@ -1647,9 +1622,9 @@ | |
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); | |
if (!ret_val) | |
- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) | |
- ? e1000_rev_polarity_reversed | |
- : e1000_rev_polarity_normal; | |
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) | |
+ ? e1000_rev_polarity_reversed | |
+ : e1000_rev_polarity_normal); | |
return ret_val; | |
} | |
@@ -1669,8 +1644,7 @@ | |
s32 ret_val; | |
u16 data, offset, mask; | |
- /* | |
- * Polarity is determined based on the speed of | |
+ /* Polarity is determined based on the speed of | |
* our connection. | |
*/ | |
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); | |
@@ -1679,23 +1653,22 @@ | |
if ((data & IGP01E1000_PSSR_SPEED_MASK) == | |
IGP01E1000_PSSR_SPEED_1000MBPS) { | |
- offset = IGP01E1000_PHY_PCS_INIT_REG; | |
- mask = IGP01E1000_PHY_POLARITY_MASK; | |
+ offset = IGP01E1000_PHY_PCS_INIT_REG; | |
+ mask = IGP01E1000_PHY_POLARITY_MASK; | |
} else { | |
- /* | |
- * This really only applies to 10Mbps since | |
+ /* This really only applies to 10Mbps since | |
* there is no polarity for 100Mbps (always 0). | |
*/ | |
- offset = IGP01E1000_PHY_PORT_STATUS; | |
- mask = IGP01E1000_PSSR_POLARITY_REVERSED; | |
+ offset = IGP01E1000_PHY_PORT_STATUS; | |
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED; | |
} | |
ret_val = e1e_rphy(hw, offset, &data); | |
if (!ret_val) | |
- phy->cable_polarity = (data & mask) | |
- ? e1000_rev_polarity_reversed | |
- : e1000_rev_polarity_normal; | |
+ phy->cable_polarity = ((data & mask) | |
+ ? e1000_rev_polarity_reversed | |
+ : e1000_rev_polarity_normal); | |
return ret_val; | |
} | |
@@ -1712,8 +1685,7 @@ | |
s32 ret_val; | |
u16 phy_data, offset, mask; | |
- /* | |
- * Polarity is determined based on the reversal feature being enabled. | |
+ /* Polarity is determined based on the reversal feature being enabled. | |
*/ | |
if (phy->polarity_correction) { | |
offset = IFE_PHY_EXTENDED_STATUS_CONTROL; | |
@@ -1726,9 +1698,9 @@ | |
ret_val = e1e_rphy(hw, offset, &phy_data); | |
if (!ret_val) | |
- phy->cable_polarity = (phy_data & mask) | |
- ? e1000_rev_polarity_reversed | |
- : e1000_rev_polarity_normal; | |
+ phy->cable_polarity = ((phy_data & mask) | |
+ ? e1000_rev_polarity_reversed | |
+ : e1000_rev_polarity_normal); | |
return ret_val; | |
} | |
@@ -1747,19 +1719,18 @@ | |
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ | |
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { | |
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | |
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | |
if (ret_val) | |
break; | |
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | |
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | |
if (ret_val) | |
break; | |
- if (phy_status & MII_SR_AUTONEG_COMPLETE) | |
+ if (phy_status & BMSR_ANEGCOMPLETE) | |
break; | |
msleep(100); | |
} | |
- /* | |
- * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation | |
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation | |
* has completed. | |
*/ | |
return ret_val; | |
@@ -1775,32 +1746,34 @@ | |
* Polls the PHY status register for link, 'iterations' number of times. | |
**/ | |
s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | |
- u32 usec_interval, bool *success) | |
+ u32 usec_interval, bool *success) | |
{ | |
s32 ret_val = 0; | |
u16 i, phy_status; | |
for (i = 0; i < iterations; i++) { | |
- /* | |
- * Some PHYs require the PHY_STATUS register to be read | |
+ /* Some PHYs require the MII_BMSR register to be read | |
* twice due to the link bit being sticky. No harm doing | |
* it across the board. | |
*/ | |
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | |
- if (ret_val) | |
- /* | |
- * If the first read fails, another entity may have | |
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | |
+ if (ret_val) { | |
+ /* If the first read fails, another entity may have | |
* ownership of the resources, wait and try again to | |
* see if they have relinquished the resources yet. | |
*/ | |
- udelay(usec_interval); | |
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | |
+ if (usec_interval >= 1000) | |
+ msleep(usec_interval / 1000); | |
+ else | |
+ udelay(usec_interval); | |
+ } | |
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | |
if (ret_val) | |
break; | |
- if (phy_status & MII_SR_LINK_STATUS) | |
+ if (phy_status & BMSR_LSTATUS) | |
break; | |
if (usec_interval >= 1000) | |
- mdelay(usec_interval/1000); | |
+ msleep(usec_interval / 1000); | |
else | |
udelay(usec_interval); | |
} | |
@@ -1833,22 +1806,20 @@ | |
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | |
- M88E1000_PSSR_CABLE_LENGTH_SHIFT; | |
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { | |
- ret_val = -E1000_ERR_PHY; | |
- goto out; | |
- } | |
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | |
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT); | |
+ | |
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) | |
+ return -E1000_ERR_PHY; | |
phy->min_cable_length = e1000_m88_cable_length_table[index]; | |
phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; | |
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -1870,10 +1841,10 @@ | |
u16 cur_agc_index, max_agc_index = 0; | |
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; | |
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { | |
- IGP02E1000_PHY_AGC_A, | |
- IGP02E1000_PHY_AGC_B, | |
- IGP02E1000_PHY_AGC_C, | |
- IGP02E1000_PHY_AGC_D | |
+ IGP02E1000_PHY_AGC_A, | |
+ IGP02E1000_PHY_AGC_B, | |
+ IGP02E1000_PHY_AGC_C, | |
+ IGP02E1000_PHY_AGC_D | |
}; | |
/* Read the AGC registers for all channels */ | |
@@ -1882,14 +1853,13 @@ | |
if (ret_val) | |
return ret_val; | |
- /* | |
- * Getting bits 15:9, which represent the combination of | |
+ /* Getting bits 15:9, which represent the combination of | |
* coarse and fine gain values. The result is a number | |
* that can be put into the lookup table to obtain the | |
* approximate cable length. | |
*/ | |
- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & | |
- IGP02E1000_AGC_LENGTH_MASK; | |
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & | |
+ IGP02E1000_AGC_LENGTH_MASK); | |
/* Array index bound check. */ | |
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || | |
@@ -1912,13 +1882,13 @@ | |
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); | |
/* Calculate cable length with the error range of +/- 10 meters. */ | |
- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? | |
- (agc_value - IGP02E1000_AGC_RANGE) : 0; | |
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? | |
+ (agc_value - IGP02E1000_AGC_RANGE) : 0); | |
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; | |
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -1934,7 +1904,7 @@ | |
s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |
{ | |
struct e1000_phy_info *phy = &hw->phy; | |
- s32 ret_val; | |
+ s32 ret_val; | |
u16 phy_data; | |
bool link; | |
@@ -1956,8 +1926,8 @@ | |
if (ret_val) | |
return ret_val; | |
- phy->polarity_correction = (phy_data & | |
- M88E1000_PSCR_POLARITY_REVERSAL); | |
+ phy->polarity_correction = !!(phy_data & | |
+ M88E1000_PSCR_POLARITY_REVERSAL); | |
ret_val = e1000_check_polarity_m88(hw); | |
if (ret_val) | |
@@ -1967,24 +1937,22 @@ | |
if (ret_val) | |
return ret_val; | |
- phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); | |
+ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); | |
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { | |
- ret_val = e1000_get_cable_length(hw); | |
+ ret_val = hw->phy.ops.get_cable_length(hw); | |
if (ret_val) | |
return ret_val; | |
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); | |
+ ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data); | |
if (ret_val) | |
return ret_val; | |
- phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) | |
- ? e1000_1000t_rx_status_ok | |
- : e1000_1000t_rx_status_not_ok; | |
+ phy->local_rx = (phy_data & LPA_1000LOCALRXOK) | |
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; | |
- phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) | |
- ? e1000_1000t_rx_status_ok | |
- : e1000_1000t_rx_status_not_ok; | |
+ phy->remote_rx = (phy_data & LPA_1000REMRXOK) | |
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; | |
} else { | |
/* Set values to "undefined" */ | |
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | |
@@ -2030,25 +1998,23 @@ | |
if (ret_val) | |
return ret_val; | |
- phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); | |
+ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); | |
if ((data & IGP01E1000_PSSR_SPEED_MASK) == | |
IGP01E1000_PSSR_SPEED_1000MBPS) { | |
- ret_val = e1000_get_cable_length(hw); | |
+ ret_val = phy->ops.get_cable_length(hw); | |
if (ret_val) | |
return ret_val; | |
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); | |
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data); | |
if (ret_val) | |
return ret_val; | |
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) | |
- ? e1000_1000t_rx_status_ok | |
- : e1000_1000t_rx_status_not_ok; | |
+ phy->local_rx = (data & LPA_1000LOCALRXOK) | |
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; | |
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) | |
- ? e1000_1000t_rx_status_ok | |
- : e1000_1000t_rx_status_not_ok; | |
+ phy->remote_rx = (data & LPA_1000REMRXOK) | |
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; | |
} else { | |
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | |
phy->local_rx = e1000_1000t_rx_status_undefined; | |
@@ -2073,44 +2039,41 @@ | |
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (!link) { | |
e_dbg("Phy info is only valid if link is up\n"); | |
- ret_val = -E1000_ERR_CONFIG; | |
- goto out; | |
+ return -E1000_ERR_CONFIG; | |
} | |
ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); | |
if (ret_val) | |
- goto out; | |
- phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) | |
- ? false : true; | |
+ return ret_val; | |
+ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); | |
if (phy->polarity_correction) { | |
ret_val = e1000_check_polarity_ife(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
} else { | |
/* Polarity is forced */ | |
- phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) | |
- ? e1000_rev_polarity_reversed | |
- : e1000_rev_polarity_normal; | |
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) | |
+ ? e1000_rev_polarity_reversed | |
+ : e1000_rev_polarity_normal); | |
} | |
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; | |
+ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); | |
/* The following parameters are undefined for 10/100 operation. */ | |
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | |
phy->local_rx = e1000_1000t_rx_status_undefined; | |
phy->remote_rx = e1000_1000t_rx_status_undefined; | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -2125,12 +2088,12 @@ | |
s32 ret_val; | |
u16 phy_ctrl; | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); | |
if (ret_val) | |
return ret_val; | |
- phy_ctrl |= MII_CR_RESET; | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); | |
+ phy_ctrl |= BMCR_RESET; | |
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); | |
if (ret_val) | |
return ret_val; | |
@@ -2154,9 +2117,11 @@ | |
s32 ret_val; | |
u32 ctrl; | |
- ret_val = e1000_check_reset_block(hw); | |
- if (ret_val) | |
- return 0; | |
+ if (phy->ops.check_reset_block) { | |
+ ret_val = phy->ops.check_reset_block(hw); | |
+ if (ret_val) | |
+ return 0; | |
+ } | |
ret_val = phy->ops.acquire(hw); | |
if (ret_val) | |
@@ -2171,23 +2136,24 @@ | |
ew32(CTRL, ctrl); | |
e1e_flush(); | |
- udelay(150); | |
+ usleep_range(150, 300); | |
phy->ops.release(hw); | |
- return e1000_get_phy_cfg_done(hw); | |
+ return phy->ops.get_cfg_done(hw); | |
} | |
/** | |
- * e1000e_get_cfg_done - Generic configuration done | |
+ * e1000e_get_cfg_done_generic - Generic configuration done | |
* @hw: pointer to the HW structure | |
* | |
* Generic function to wait 10 milli-seconds for configuration to complete | |
* and return success. | |
**/ | |
-s32 e1000e_get_cfg_done(struct e1000_hw *hw) | |
+s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw) | |
{ | |
mdelay(10); | |
+ | |
return 0; | |
} | |
@@ -2254,15 +2220,13 @@ | |
e1e_wphy(hw, 0x1796, 0x0008); | |
/* Change cg_icount + enable integbp for channels BCD */ | |
e1e_wphy(hw, 0x1798, 0xD008); | |
- /* | |
- * Change cg_icount + enable integbp + change prop_factor_master | |
+ /* Change cg_icount + enable integbp + change prop_factor_master | |
* to 8 for channel A | |
*/ | |
e1e_wphy(hw, 0x1898, 0xD918); | |
/* Disable AHT in Slave mode on channel A */ | |
e1e_wphy(hw, 0x187A, 0x0800); | |
- /* | |
- * Enable LPLU and disable AN to 1000 in non-D0a states, | |
+ /* Enable LPLU and disable AN to 1000 in non-D0a states, | |
* Enable SPD+B2B | |
*/ | |
e1e_wphy(hw, 0x0019, 0x008D); | |
@@ -2276,38 +2240,6 @@ | |
return 0; | |
} | |
-/* Internal function pointers */ | |
- | |
-/** | |
- * e1000_get_phy_cfg_done - Generic PHY configuration done | |
- * @hw: pointer to the HW structure | |
- * | |
- * Return success if silicon family did not implement a family specific | |
- * get_cfg_done function. | |
- **/ | |
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) | |
-{ | |
- if (hw->phy.ops.get_cfg_done) | |
- return hw->phy.ops.get_cfg_done(hw); | |
- | |
- return 0; | |
-} | |
- | |
-/** | |
- * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex | |
- * @hw: pointer to the HW structure | |
- * | |
- * When the silicon family has not implemented a forced speed/duplex | |
- * function for the PHY, simply return 0. | |
- **/ | |
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |
-{ | |
- if (hw->phy.ops.force_speed_duplex) | |
- return hw->phy.ops.force_speed_duplex(hw); | |
- | |
- return 0; | |
-} | |
- | |
/** | |
* e1000e_get_phy_type_from_id - Get PHY type from id | |
* @phy_id: phy_id read from the phy | |
@@ -2325,7 +2257,7 @@ | |
case M88E1011_I_PHY_ID: | |
phy_type = e1000_phy_m88; | |
break; | |
- case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ | |
+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ | |
phy_type = e1000_phy_igp_2; | |
break; | |
case GG82563_E_PHY_ID: | |
@@ -2352,6 +2284,9 @@ | |
case I82579_E_PHY_ID: | |
phy_type = e1000_phy_82579; | |
break; | |
+ case I217_E_PHY_ID: | |
+ phy_type = e1000_phy_i217; | |
+ break; | |
default: | |
phy_type = e1000_phy_unknown; | |
break; | |
@@ -2369,7 +2304,6 @@ | |
**/ | |
s32 e1000e_determine_phy_address(struct e1000_hw *hw) | |
{ | |
- s32 ret_val = -E1000_ERR_PHY_TYPE; | |
u32 phy_addr = 0; | |
u32 i; | |
enum e1000_phy_type phy_type = e1000_phy_unknown; | |
@@ -2384,21 +2318,18 @@ | |
e1000e_get_phy_id(hw); | |
phy_type = e1000e_get_phy_type_from_id(hw->phy.id); | |
- /* | |
- * If phy_type is valid, break - we found our | |
+ /* If phy_type is valid, break - we found our | |
* PHY address | |
*/ | |
- if (phy_type != e1000_phy_unknown) { | |
- ret_val = 0; | |
- goto out; | |
- } | |
+ if (phy_type != e1000_phy_unknown) | |
+ return 0; | |
+ | |
usleep_range(1000, 2000); | |
i++; | |
} while (i < 10); | |
} | |
-out: | |
- return ret_val; | |
+ return -E1000_ERR_PHY_TYPE; | |
} | |
/** | |
@@ -2439,7 +2370,7 @@ | |
if (page == BM_WUC_PAGE) { | |
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | |
false, false); | |
- goto out; | |
+ goto release; | |
} | |
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | |
@@ -2447,8 +2378,7 @@ | |
if (offset > MAX_PHY_MULTI_PAGE_REG) { | |
u32 page_shift, page_select; | |
- /* | |
- * Page select is register 31 for phy address 1 and 22 for | |
+ /* Page select is register 31 for phy address 1 and 22 for | |
* phy address 2 and 3. Page select is shifted only for | |
* phy address 1. | |
*/ | |
@@ -2462,15 +2392,15 @@ | |
/* Page is shifted left, PHY expects (page x 32) */ | |
ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | |
- (page << page_shift)); | |
+ (page << page_shift)); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
} | |
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
+ data); | |
-out: | |
+release: | |
hw->phy.ops.release(hw); | |
return ret_val; | |
} | |
@@ -2498,7 +2428,7 @@ | |
if (page == BM_WUC_PAGE) { | |
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | |
true, false); | |
- goto out; | |
+ goto release; | |
} | |
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | |
@@ -2506,8 +2436,7 @@ | |
if (offset > MAX_PHY_MULTI_PAGE_REG) { | |
u32 page_shift, page_select; | |
- /* | |
- * Page select is register 31 for phy address 1 and 22 for | |
+ /* Page select is register 31 for phy address 1 and 22 for | |
* phy address 2 and 3. Page select is shifted only for | |
* phy address 1. | |
*/ | |
@@ -2521,14 +2450,14 @@ | |
/* Page is shifted left, PHY expects (page x 32) */ | |
ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | |
- (page << page_shift)); | |
+ (page << page_shift)); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
} | |
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | |
- data); | |
-out: | |
+ data); | |
+release: | |
hw->phy.ops.release(hw); | |
return ret_val; | |
} | |
@@ -2556,24 +2485,23 @@ | |
if (page == BM_WUC_PAGE) { | |
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | |
true, false); | |
- goto out; | |
+ goto release; | |
} | |
hw->phy.addr = 1; | |
if (offset > MAX_PHY_MULTI_PAGE_REG) { | |
- | |
/* Page is shifted left, PHY expects (page x 32) */ | |
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, | |
page); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
} | |
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | |
data); | |
-out: | |
+release: | |
hw->phy.ops.release(hw); | |
return ret_val; | |
} | |
@@ -2600,7 +2528,7 @@ | |
if (page == BM_WUC_PAGE) { | |
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | |
false, false); | |
- goto out; | |
+ goto release; | |
} | |
hw->phy.addr = 1; | |
@@ -2611,13 +2539,13 @@ | |
page); | |
if (ret_val) | |
- goto out; | |
+ goto release; | |
} | |
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | |
data); | |
-out: | |
+release: | |
hw->phy.ops.release(hw); | |
return ret_val; | |
} | |
@@ -2642,18 +2570,17 @@ | |
ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); | |
if (ret_val) { | |
e_dbg("Could not set Port Control page\n"); | |
- goto out; | |
+ return ret_val; | |
} | |
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | |
if (ret_val) { | |
e_dbg("Could not read PHY register %d.%d\n", | |
BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); | |
- goto out; | |
+ return ret_val; | |
} | |
- /* | |
- * Enable both PHY wakeup mode and Wakeup register page writes. | |
+ /* Enable both PHY wakeup mode and Wakeup register page writes. | |
* Prevent a power state change by disabling ME and Host PHY wakeup. | |
*/ | |
temp = *phy_reg; | |
@@ -2664,15 +2591,13 @@ | |
if (ret_val) { | |
e_dbg("Could not write PHY register %d.%d\n", | |
BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); | |
- goto out; | |
+ return ret_val; | |
} | |
- /* Select Host Wakeup Registers page */ | |
- ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); | |
- | |
- /* caller now able to write registers on the Wakeup registers page */ | |
-out: | |
- return ret_val; | |
+ /* Select Host Wakeup Registers page - caller now able to write | |
+ * registers on the Wakeup registers page | |
+ */ | |
+ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); | |
} | |
/** | |
@@ -2688,13 +2613,13 @@ | |
**/ | |
s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) | |
{ | |
- s32 ret_val = 0; | |
+ s32 ret_val; | |
/* Select Port Control Registers page */ | |
ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); | |
if (ret_val) { | |
e_dbg("Could not set Port Control page\n"); | |
- goto out; | |
+ return ret_val; | |
} | |
/* Restore 769.17 to its original value */ | |
@@ -2702,7 +2627,7 @@ | |
if (ret_val) | |
e_dbg("Could not restore PHY register %d.%d\n", | |
BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); | |
-out: | |
+ | |
return ret_val; | |
} | |
@@ -2750,7 +2675,7 @@ | |
ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); | |
if (ret_val) { | |
e_dbg("Could not enable PHY wakeup reg access\n"); | |
- goto out; | |
+ return ret_val; | |
} | |
} | |
@@ -2760,13 +2685,13 @@ | |
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); | |
if (ret_val) { | |
e_dbg("Could not write address opcode to page %d\n", page); | |
- goto out; | |
+ return ret_val; | |
} | |
if (read) { | |
/* Read the Wakeup register page value using opcode 0x12 */ | |
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | |
- data); | |
+ data); | |
} else { | |
/* Write the Wakeup register page value using opcode 0x12 */ | |
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | |
@@ -2775,13 +2700,12 @@ | |
if (ret_val) { | |
e_dbg("Could not access PHY reg %d.%d\n", page, reg); | |
- goto out; | |
+ return ret_val; | |
} | |
if (!page_set) | |
ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); | |
-out: | |
return ret_val; | |
} | |
@@ -2798,9 +2722,9 @@ | |
u16 mii_reg = 0; | |
/* The PHY will retain its settings across a power down/up cycle */ | |
- e1e_rphy(hw, PHY_CONTROL, &mii_reg); | |
- mii_reg &= ~MII_CR_POWER_DOWN; | |
- e1e_wphy(hw, PHY_CONTROL, mii_reg); | |
+ e1e_rphy(hw, MII_BMCR, &mii_reg); | |
+ mii_reg &= ~BMCR_PDOWN; | |
+ e1e_wphy(hw, MII_BMCR, mii_reg); | |
} | |
/** | |
@@ -2816,50 +2740,13 @@ | |
u16 mii_reg = 0; | |
/* The PHY will retain its settings across a power down/up cycle */ | |
- e1e_rphy(hw, PHY_CONTROL, &mii_reg); | |
- mii_reg |= MII_CR_POWER_DOWN; | |
- e1e_wphy(hw, PHY_CONTROL, mii_reg); | |
+ e1e_rphy(hw, MII_BMCR, &mii_reg); | |
+ mii_reg |= BMCR_PDOWN; | |
+ e1e_wphy(hw, MII_BMCR, mii_reg); | |
usleep_range(1000, 2000); | |
} | |
/** | |
- * e1000e_commit_phy - Soft PHY reset | |
- * @hw: pointer to the HW structure | |
- * | |
- * Performs a soft PHY reset on those that apply. This is a function pointer | |
- * entry point called by drivers. | |
- **/ | |
-s32 e1000e_commit_phy(struct e1000_hw *hw) | |
-{ | |
- if (hw->phy.ops.commit) | |
- return hw->phy.ops.commit(hw); | |
- | |
- return 0; | |
-} | |
- | |
-/** | |
- * e1000_set_d0_lplu_state - Sets low power link up state for D0 | |
- * @hw: pointer to the HW structure | |
- * @active: boolean used to enable/disable lplu | |
- * | |
- * Success returns 0, Failure returns 1 | |
- * | |
- * The low power link up (lplu) state is set to the power management level D0 | |
- * and SmartSpeed is disabled when active is true, else clear lplu for D0 | |
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU | |
- * is used during Dx states where the power conservation is most important. | |
- * During driver activity, SmartSpeed should be enabled so performance is | |
- * maintained. This is a function pointer entry point called by drivers. | |
- **/ | |
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) | |
-{ | |
- if (hw->phy.ops.set_d0_lplu_state) | |
- return hw->phy.ops.set_d0_lplu_state(hw, active); | |
- | |
- return 0; | |
-} | |
- | |
-/** | |
* __e1000_read_phy_reg_hv - Read HV PHY register | |
* @hw: pointer to the HW structure | |
* @offset: register offset to be read | |
@@ -2893,7 +2780,7 @@ | |
if (page > 0 && page < HV_INTC_FC_PAGE_START) { | |
ret_val = e1000_access_phy_debug_regs_hv(hw, offset, | |
- data, true); | |
+ data, true); | |
goto out; | |
} | |
@@ -2916,8 +2803,7 @@ | |
e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, | |
page << IGP_PAGE_SHIFT, reg); | |
- ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | |
- data); | |
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); | |
out: | |
if (!locked) | |
hw->phy.ops.release(hw); | |
@@ -3001,7 +2887,7 @@ | |
if (page > 0 && page < HV_INTC_FC_PAGE_START) { | |
ret_val = e1000_access_phy_debug_regs_hv(hw, offset, | |
- &data, false); | |
+ &data, false); | |
goto out; | |
} | |
@@ -3009,14 +2895,13 @@ | |
if (page == HV_INTC_FC_PAGE_START) | |
page = 0; | |
- /* | |
- * Workaround MDIO accesses being disabled after entering IEEE | |
+ /* Workaround MDIO accesses being disabled after entering IEEE | |
* Power Down (when bit 11 of the PHY Control register is set) | |
*/ | |
if ((hw->phy.type == e1000_phy_82578) && | |
(hw->phy.revision >= 1) && | |
(hw->phy.addr == 2) && | |
- ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { | |
+ !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { | |
u16 data2 = 0x7EFF; | |
ret_val = e1000_access_phy_debug_regs_hv(hw, | |
(1 << 6) | 0x3, | |
@@ -3041,7 +2926,7 @@ | |
page << IGP_PAGE_SHIFT, reg); | |
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | |
- data); | |
+ data); | |
out: | |
if (!locked) | |
@@ -3119,15 +3004,15 @@ | |
* These accesses done with PHY address 2 and without using pages. | |
**/ | |
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |
- u16 *data, bool read) | |
+ u16 *data, bool read) | |
{ | |
s32 ret_val; | |
- u32 addr_reg = 0; | |
- u32 data_reg = 0; | |
+ u32 addr_reg; | |
+ u32 data_reg; | |
/* This takes care of the difference with desktop vs mobile phy */ | |
- addr_reg = (hw->phy.type == e1000_phy_82578) ? | |
- I82578_ADDR_REG : I82577_ADDR_REG; | |
+ addr_reg = ((hw->phy.type == e1000_phy_82578) ? | |
+ I82578_ADDR_REG : I82577_ADDR_REG); | |
data_reg = addr_reg + 1; | |
/* All operations in this function are phy address 2 */ | |
@@ -3137,7 +3022,7 @@ | |
ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); | |
if (ret_val) { | |
e_dbg("Could not write the Address Offset port register\n"); | |
- goto out; | |
+ return ret_val; | |
} | |
/* Read or write the data value next */ | |
@@ -3146,12 +3031,9 @@ | |
else | |
ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); | |
- if (ret_val) { | |
+ if (ret_val) | |
e_dbg("Could not access the Data port register\n"); | |
- goto out; | |
- } | |
-out: | |
return ret_val; | |
} | |
@@ -3172,39 +3054,35 @@ | |
u16 data; | |
if (hw->phy.type != e1000_phy_82578) | |
- goto out; | |
+ return 0; | |
/* Do not apply workaround if in PHY loopback bit 14 set */ | |
- e1e_rphy(hw, PHY_CONTROL, &data); | |
- if (data & PHY_CONTROL_LB) | |
- goto out; | |
+ e1e_rphy(hw, MII_BMCR, &data); | |
+ if (data & BMCR_LOOPBACK) | |
+ return 0; | |
/* check if link is up and at 1Gbps */ | |
ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- data &= BM_CS_STATUS_LINK_UP | | |
- BM_CS_STATUS_RESOLVED | | |
- BM_CS_STATUS_SPEED_MASK; | |
- | |
- if (data != (BM_CS_STATUS_LINK_UP | | |
- BM_CS_STATUS_RESOLVED | | |
- BM_CS_STATUS_SPEED_1000)) | |
- goto out; | |
+ data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | | |
+ BM_CS_STATUS_SPEED_MASK); | |
+ | |
+ if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | | |
+ BM_CS_STATUS_SPEED_1000)) | |
+ return 0; | |
- mdelay(200); | |
+ msleep(200); | |
/* flush the packets in the fifo buffer */ | |
- ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | | |
- HV_MUX_DATA_CTRL_FORCE_SPEED); | |
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, | |
+ (HV_MUX_DATA_CTRL_GEN_TO_MAC | | |
+ HV_MUX_DATA_CTRL_FORCE_SPEED)); | |
if (ret_val) | |
- goto out; | |
- | |
- ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); | |
+ return ret_val; | |
-out: | |
- return ret_val; | |
+ return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); | |
} | |
/** | |
@@ -3224,9 +3102,9 @@ | |
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); | |
if (!ret_val) | |
- phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) | |
- ? e1000_rev_polarity_reversed | |
- : e1000_rev_polarity_normal; | |
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) | |
+ ? e1000_rev_polarity_reversed | |
+ : e1000_rev_polarity_normal); | |
return ret_val; | |
} | |
@@ -3244,41 +3122,34 @@ | |
u16 phy_data; | |
bool link; | |
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | |
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | |
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | |
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
udelay(1); | |
if (phy->autoneg_wait_to_complete) { | |
e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); | |
- ret_val = e1000e_phy_has_link_generic(hw, | |
- PHY_FORCE_LIMIT, | |
- 100000, | |
- &link); | |
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
+ 100000, &link); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (!link) | |
e_dbg("Link taking longer than expected.\n"); | |
/* Try once more */ | |
- ret_val = e1000e_phy_has_link_generic(hw, | |
- PHY_FORCE_LIMIT, | |
- 100000, | |
- &link); | |
- if (ret_val) | |
- goto out; | |
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | |
+ 100000, &link); | |
} | |
-out: | |
return ret_val; | |
} | |
@@ -3300,51 +3171,47 @@ | |
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
if (!link) { | |
e_dbg("Phy info is only valid if link is up\n"); | |
- ret_val = -E1000_ERR_CONFIG; | |
- goto out; | |
+ return -E1000_ERR_CONFIG; | |
} | |
phy->polarity_correction = true; | |
ret_val = e1000_check_polarity_82577(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; | |
+ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); | |
if ((data & I82577_PHY_STATUS2_SPEED_MASK) == | |
I82577_PHY_STATUS2_SPEED_1000MBPS) { | |
ret_val = hw->phy.ops.get_cable_length(hw); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); | |
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) | |
- ? e1000_1000t_rx_status_ok | |
- : e1000_1000t_rx_status_not_ok; | |
+ phy->local_rx = (data & LPA_1000LOCALRXOK) | |
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; | |
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) | |
- ? e1000_1000t_rx_status_ok | |
- : e1000_1000t_rx_status_not_ok; | |
+ phy->remote_rx = (data & LPA_1000REMRXOK) | |
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; | |
} else { | |
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | |
phy->local_rx = e1000_1000t_rx_status_undefined; | |
phy->remote_rx = e1000_1000t_rx_status_undefined; | |
} | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
/** | |
@@ -3362,16 +3229,15 @@ | |
ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); | |
if (ret_val) | |
- goto out; | |
+ return ret_val; | |
- length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> | |
- I82577_DSTATUS_CABLE_LENGTH_SHIFT; | |
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> | |
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT); | |
if (length == E1000_CABLE_LENGTH_UNDEFINED) | |
- ret_val = -E1000_ERR_PHY; | |
+ return -E1000_ERR_PHY; | |
phy->cable_length = length; | |
-out: | |
- return ret_val; | |
+ return 0; | |
} | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: phy.h | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: ptp.c | |
Only in /home/arch/linux/drivers/net/ethernet/intel/e1000e: regs.h |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment