[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 22/22] Add netchannel2 VMQ support to an old version of the ixgbe driver.



This is a bit of a mess, and doesn't really want to be applied as-is,
but might be useful for testing.

The VMQ patch which I have is against version 1.3.56.5 of the driver,
whereas the current 2.6.27 tree has version 2.0.34.3.  I don't
currently have access to any VMQ-capable hardware, and won't be at
Citrix long enough to acquire any, so this patch just rolls the driver
back to 1.3.56.5 and adds VMQ support to that.

The original VMQ patch was

Signed-off-by: Mitch Williams <mitch.a.williams@xxxxxxxxx>

My only contribution was to run combinediff, but FWIW that's

Signed-off-by: Steven Smith <steven.smith@xxxxxxxxxx>
---
 drivers/net/ixgbe/Makefile          |    4 +-
 drivers/net/ixgbe/ixgbe.h           |  220 +-
 drivers/net/ixgbe/ixgbe_82598.c     |  487 ++--
 drivers/net/ixgbe/ixgbe_82599.c     | 2626 --------------------
 drivers/net/ixgbe/ixgbe_api.c       |  169 +--
 drivers/net/ixgbe/ixgbe_api.h       |   57 +-
 drivers/net/ixgbe/ixgbe_common.c    |  622 ++----
 drivers/net/ixgbe/ixgbe_common.h    |   12 +-
 drivers/net/ixgbe/ixgbe_dcb.c       |   19 +-
 drivers/net/ixgbe/ixgbe_dcb.h       |   28 +-
 drivers/net/ixgbe/ixgbe_dcb_82598.c |    7 +-
 drivers/net/ixgbe/ixgbe_dcb_82598.h |    2 +-
 drivers/net/ixgbe/ixgbe_dcb_82599.c |  508 ----
 drivers/net/ixgbe/ixgbe_dcb_82599.h |  125 -
 drivers/net/ixgbe/ixgbe_dcb_nl.c    |  555 +-----
 drivers/net/ixgbe/ixgbe_ethtool.c   |  561 ++----
 drivers/net/ixgbe/ixgbe_main.c      | 4486 +++++++++++++----------------------
 drivers/net/ixgbe/ixgbe_osdep.h     |   15 +-
 drivers/net/ixgbe/ixgbe_param.c     |  524 +----
 drivers/net/ixgbe/ixgbe_phy.c       |  792 +------
 drivers/net/ixgbe/ixgbe_phy.h       |   27 +-
 drivers/net/ixgbe/ixgbe_type.h      |  938 +--------
 drivers/net/ixgbe/kcompat.c         |  195 +--
 drivers/net/ixgbe/kcompat_ethtool.c |   10 +-
 drivers/xen/netchannel2/vmq.c       |    3 +
 25 files changed, 2457 insertions(+), 10535 deletions(-)
 delete mode 100644 drivers/net/ixgbe/ixgbe_82599.c
 delete mode 100644 drivers/net/ixgbe/ixgbe_dcb_82599.c
 delete mode 100644 drivers/net/ixgbe/ixgbe_dcb_82599.h

diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index aa7eaac..afc940d 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,8 +33,8 @@
 obj-$(CONFIG_IXGBE) += ixgbe.o
 
 CFILES = ixgbe_main.c ixgbe_common.c ixgbe_api.c ixgbe_param.c \
-         ixgbe_ethtool.c kcompat.c ixgbe_82598.c ixgbe_82599.c \
-         ixgbe_dcb.c ixgbe_dcb_nl.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c \
+         ixgbe_ethtool.c kcompat.c ixgbe_82598.c \
+         ixgbe_dcb.c ixgbe_dcb_nl.c ixgbe_dcb_82598.c \
          ixgbe_phy.c ixgbe_sysfs.c
 
 ixgbe-objs := $(CFILES:.c=.o)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index f6c6d26..d76fd88 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,9 @@
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+#include <linux/netvmq.h>
+#endif
 
 #ifdef SIOCETHTOOL
 #include <linux/ethtool.h>
@@ -50,12 +53,20 @@
 
 #include "ixgbe_dcb.h"
 
-
 #include "kcompat.h"
 
-
 #include "ixgbe_api.h"
 
+#define IXGBE_NO_INET_LRO
+#ifndef IXGBE_NO_LRO
+#if defined(CONFIG_INET_LRO) || defined(CONFIG_INET_LRO_MODULE)
+#include <linux/inet_lro.h>
+#define IXGBE_MAX_LRO_DESCRIPTORS                 8
+#undef IXGBE_NO_INET_LRO
+#define IXGBE_NO_LRO
+#endif
+#endif /* IXGBE_NO_LRO */
+
 #define PFX "ixgbe: "
 #define DPRINTK(nlevel, klevel, fmt, args...) \
        ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
@@ -88,17 +99,14 @@
 #define IXGBE_RXBUFFER_128   128    /* Used for packet split */
 #define IXGBE_RXBUFFER_256   256    /* Used for packet split */
 #define IXGBE_RXBUFFER_2048  2048
-#define IXGBE_RXBUFFER_4096  4096
-#define IXGBE_RXBUFFER_8192  8192
-#define IXGBE_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 
 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
 
 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
-#if defined(IXGBE_DCB) || defined(IXGBE_RSS) || \
-    defined(IXGBE_VMDQ)
-#define IXGBE_MQ
+#if defined(CONFIG_IXGBE_DCB) || defined(CONFIG_IXGBE_RSS) || \
+    defined(CONFIG_IXGBE_VMDQ)
+#define CONFIG_IXGBE_MQ
 #endif
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -108,8 +116,6 @@
 #define IXGBE_TX_FLAGS_VLAN            (u32)(1 << 1)
 #define IXGBE_TX_FLAGS_TSO             (u32)(1 << 2)
 #define IXGBE_TX_FLAGS_IPV4            (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_FCOE            (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FSO             (u32)(1 << 5)
 #define IXGBE_TX_FLAGS_VLAN_MASK       0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK  0x0000e000
 #define IXGBE_TX_FLAGS_VLAN_SHIFT      16
@@ -121,34 +127,37 @@
 struct ixgbe_lro_stats {
        u32 flushed;
        u32 coal;
-       u32 recycled;
 };
 
 struct ixgbe_lro_desc {
        struct  hlist_node lro_node;
        struct  sk_buff *skb;
+       struct  sk_buff *last_skb;
+       int     timestamp;
+       u32   tsval;
+       u32   tsecr;
        u32   source_ip;
        u32   dest_ip;
-       u16   source_port;
-       u16   dest_port;
-       u16   vlan_tag;
-       u16   len;
        u32   next_seq;
        u32   ack_seq;
        u16   window;
+       u16   source_port;
+       u16   dest_port;
+       u16   append_cnt;
        u16   mss;
-       u16   opt_bytes;
-       u16   psh:1;
-       u32   tsval;
-       u32   tsecr;
-       u32   append_cnt;
+       u32   data_size;        /*TCP data size*/
+       u16   vlan_tag;
+};
+
+struct ixgbe_lro_info {
+       struct ixgbe_lro_stats stats;
+       int max;                /*Maximum number of packet to coalesce.*/
 };
 
 struct ixgbe_lro_list {
        struct hlist_head active;
        struct hlist_head free;
        int active_cnt;
-       struct ixgbe_lro_stats stats;
 };
 
 #endif /* IXGBE_NO_LRO */
@@ -177,18 +186,17 @@ struct ixgbe_queue_stats {
 
 struct ixgbe_ring {
        void *desc;                     /* descriptor ring memory */
+       dma_addr_t dma;                 /* phys. address of descriptor ring */
+       unsigned int size;              /* length in bytes */
+       unsigned int count;             /* amount of descriptors */
+       unsigned int next_to_use;
+       unsigned int next_to_clean;
+
+       int queue_index; /* needed for multiqueue queue management */
        union {
                struct ixgbe_tx_buffer *tx_buffer_info;
                struct ixgbe_rx_buffer *rx_buffer_info;
        };
-       u8 atr_sample_rate;
-       u8 atr_count;
-       u16 count;                      /* amount of descriptors */
-       u16 rx_buf_len;
-       u16 next_to_use;
-       u16 next_to_clean;
-
-       u8 queue_index; /* needed for multiqueue queue management */
 
        u16 head;
        u16 tail;
@@ -196,43 +204,47 @@ struct ixgbe_ring {
        unsigned int total_bytes;
        unsigned int total_packets;
 
+       u16 reg_idx; /* holds the special value that gets the hardware register
+                     * offset associated with this ring, which is different
+                     * for DCB and RSS modes */
+
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        /* cpu for tx queue */
        int cpu;
 #endif
-       u16 work_limit;                /* max work per interrupt */
-       u16 reg_idx;                    /* holds the special value that gets the
-                                        * hardware register offset associated
-                                        * with this ring, which is different
-                                        * for DCB and RSS modes */
 
        struct ixgbe_queue_stats stats;
-       unsigned long reinit_state;
-       u64 rsc_count;                 /* stat for coalesced packets */
-       unsigned int size;              /* length in bytes */
-       dma_addr_t dma;                 /* phys. address of descriptor ring */
-};
-
-enum ixgbe_ring_f_enum {
-       RING_F_NONE = 0,
-       RING_F_DCB,
-       RING_F_VMDQ,
-       RING_F_RSS,
-       RING_F_FDIR,
-       RING_F_ARRAY_SIZE      /* must be last in enum set */
+       u16 v_idx; /* maps directly to the index for this ring in the hardware
+                  * vector array, can also be used for finding the bit in EICR
+                  * and friends that represents the vector for this ring */
+#ifndef IXGBE_NO_LRO
+       /* LRO list for rx queue */
+       struct ixgbe_lro_list *lrolist;
+#endif
+#ifndef IXGBE_NO_INET_LRO
+       struct net_lro_mgr  lro_mgr;
+       bool lro_used;
+#endif
+       u16 work_limit;                /* max work per interrupt */
+       u16 rx_buf_len;
+       u8 mac_addr[ETH_ALEN];
+       u8 active;
+       u8 allocated;
 };
 
+#define RING_F_DCB  0
+#define RING_F_VMDQ 1
+#define RING_F_RSS  2
 #define IXGBE_MAX_DCB_INDICES   8
 #define IXGBE_MAX_RSS_INDICES  16
-#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
+#define IXGBE_MAX_VMDQ_INDICES 16
 struct ixgbe_ring_feature {
        int indices;
        int mask;
 };
 
-#define MAX_RX_QUEUES 128
-#define MAX_TX_QUEUES 128
+#define MAX_RX_QUEUES 64
+#define MAX_TX_QUEUES 32
 
 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
                                ? 8 : 1)
@@ -243,9 +255,6 @@ struct ixgbe_ring_feature {
  */
 struct ixgbe_q_vector {
        struct ixgbe_adapter *adapter;
-       unsigned int v_idx; /* index of q_vector within array, also used for
-                            * finding the bit in EICR and friends that
-                            * represents the vector for this ring */
 #ifdef CONFIG_IXGBE_NAPI
        struct napi_struct napi;
 #endif
@@ -256,19 +265,14 @@ struct ixgbe_q_vector {
        u8 tx_itr;
        u8 rx_itr;
        u32 eitr;
-#ifndef IXGBE_NO_LRO
-       struct ixgbe_lro_list *lrolist;   /* LRO list for queue vector*/
-#endif
-       char name[IFNAMSIZ + 9];
 };
 
 
 /* Helper macros to switch between ints/sec and what the register uses.
- * And yes, it's the same math going both ways.  The lowest value
- * supported by all of the ixgbe hardware is 8.
+ * And yes, it's the same math going both ways.
  */
 #define EITR_INTS_PER_SEC_TO_REG(_eitr) \
-       ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+       ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 0)
 #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
 
 #define IXGBE_DESC_UNUSED(R) \
@@ -295,21 +299,9 @@ struct ixgbe_q_vector {
 #define OTHER_VECTOR 1
 #define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
 
-#define IXGBE_MAX_MSIX_VECTORS_82599 64
-#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64
-#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16
-#define IXGBE_MAX_MSIX_VECTORS_82598 18
-
-/*
- * Only for array allocations in our adapter struct.  On 82598, there will be
- * unused entries in the array, but that's not a big deal.  Also, in 82599,
- * we can actually assign 64 queue vectors based on our extended-extended
- * interrupt registers.  This is different than 82598, which is limited to 16.
- */
-#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599
-#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599
-
+#define MAX_MSIX_Q_VECTORS 16
 #define MIN_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS)
 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
 
 /* board specific private data structure */
@@ -320,11 +312,11 @@ struct ixgbe_adapter {
 #endif
        u16 bd_number;
        struct work_struct reset_task;
-       struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+       struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
+       char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
        struct ixgbe_dcb_config dcb_cfg;
        struct ixgbe_dcb_config temp_dcb_cfg;
        u8 dcb_set_bitmap;
-       enum ixgbe_fc_mode last_lfc_mode;
 
        /* Interrupt Throttle Rate */
        u32 itr_setting;
@@ -345,24 +337,21 @@ struct ixgbe_adapter {
        /* RX */
        struct ixgbe_ring *rx_ring;     /* One per active queue */
        int num_rx_queues;
-       int num_rx_pools;               /* == num_rx_queues in 82598 */
-       int num_rx_queues_per_pool;     /* 1 if 82598, can be many if 82599 */
        u64 hw_csum_rx_error;
-       u64 hw_rx_no_dma_resources;
        u64 hw_csum_rx_good;
        u64 non_eop_descs;
 #ifndef CONFIG_IXGBE_NAPI
        u64 rx_dropped_backlog;         /* count drops from rx intr handler */
 #endif
        int num_msix_vectors;
-       int max_msix_q_vectors;         /* true count of q_vectors for device */
-       struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+       struct ixgbe_ring_feature ring_feature[3];
        struct msix_entry *msix_entries;
 #ifdef IXGBE_TCP_TIMER
        irqreturn_t (*msix_handlers[MAX_MSIX_COUNT])(int irq, void *data,
                                                     struct pt_regs *regs);
 #endif
 
+       u64 rx_hdr_split;
        u32 alloc_rx_page_failed;
        u32 alloc_rx_buff_failed;
 
@@ -384,7 +373,7 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 9)
 #define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 10)
 #define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 11)
-#define IXGBE_FLAG_DCA_ENABLED_DATA             (u32)(1 << 12)
+#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 12)
 #define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 13)
 #define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 14)
 #define IXGBE_FLAG_DCB_CAPABLE                  (u32)(1 << 15)
@@ -395,19 +384,7 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20)
 #define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22)
 #define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1 << 23)
-#define IXGBE_FLAG_IN_SFP_LINK_TASK             (u32)(1 << 24)
-#define IXGBE_FLAG_IN_SFP_MOD_TASK              (u32)(1 << 25)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 26)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 27)
-       u32 flags2;
-#ifndef IXGBE_NO_HW_RSC
-#define IXGBE_FLAG2_RSC_CAPABLE                  (u32)(1)
-#define IXGBE_FLAG2_RSC_ENABLED                  (u32)(1 << 1)
-#endif /* IXGBE_NO_HW_RSC */
-#ifndef IXGBE_NO_LRO
-#define IXGBE_FLAG2_SWLRO_ENABLED                (u32)(1 << 2)
-#endif /* IXGBE_NO_LRO */
-#define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE        (u32)(1 << 3)
+
 /* default to trying for four seconds */
 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 
@@ -416,7 +393,7 @@ struct ixgbe_adapter {
        struct pci_dev *pdev;
        struct net_device_stats net_stats;
 #ifndef IXGBE_NO_LRO
-       struct ixgbe_lro_stats lro_stats;
+       struct ixgbe_lro_info lro_data;
 #endif
 
 #ifdef ETHTOOL_TEST
@@ -433,15 +410,23 @@ struct ixgbe_adapter {
        u32 lli_port;
        u32 lli_size;
        u64 lli_int;
-       u32 lli_etype;
-       u32 lli_vlan_pri;
-#endif /* IXGBE_NO_LLI */
+#endif
        /* Interrupt Throttle Rate */
        u32 eitr_param;
 
        unsigned long state;
        u32 *config_space;
        u64 tx_busy;
+#ifndef IXGBE_NO_INET_LRO
+       unsigned int lro_max_aggr;
+       unsigned int lro_aggregated;
+       unsigned int lro_flushed;
+       unsigned int lro_no_desc;
+#endif
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       struct net_vmq *vmq;
+       u32 rx_queues_allocated;
+#endif
        unsigned int tx_ring_count;
        unsigned int rx_ring_count;
 
@@ -452,41 +437,19 @@ struct ixgbe_adapter {
        struct work_struct watchdog_task;
        struct work_struct sfp_task;
        struct timer_list sfp_timer;
-       struct work_struct multispeed_fiber_task;
-       struct work_struct sfp_config_module_task;
-       u64 flm;
-       u32 fdir_pballoc;
-       u32 atr_sample_rate;
-       spinlock_t fdir_perfect_lock;
-       struct work_struct fdir_reinit_task;
-       u64 rsc_count;
-       u32 wol;
-       u16 eeprom_version;
-       bool netdev_registered;
-       char lsc_int_name[IFNAMSIZ + 9];
-#ifdef IXGBE_TCP_TIMER
-       char tcp_timer_name[IFNAMSIZ + 9];
-#endif
 };
 
 enum ixbge_state_t {
        __IXGBE_TESTING,
        __IXGBE_RESETTING,
        __IXGBE_DOWN,
-       __IXGBE_FDIR_INIT_DONE,
        __IXGBE_SFP_MODULE_NOT_FOUND
 };
 
-#ifdef CONFIG_DCB
-extern struct dcbnl_rtnl_ops dcbnl_ops;
-extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
-                             struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max);
-#endif
 
 /* needed by ixgbe_main.c */
 extern int ixgbe_validate_mac_addr(u8 *mc_addr);
 extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
-extern void ixgbe_assign_netdev_ops(struct net_device *netdev);
 
 /* needed by ixgbe_ethtool.c */
 extern char ixgbe_driver_name[];
@@ -502,8 +465,10 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter 
*,struct ixgbe_ring *);
 extern void ixgbe_free_rx_resources(struct ixgbe_adapter *,struct ixgbe_ring 
*);
 extern void ixgbe_free_tx_resources(struct ixgbe_adapter *,struct ixgbe_ring 
*);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+
+/* needed by ixgbe_dcb_nl.c */
+extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
 
 #ifdef ETHTOOL_OPS_COMPAT
@@ -513,5 +478,12 @@ extern int ethtool_ioctl(struct ifreq *ifr);
 extern int ixgbe_dcb_netlink_register(void);
 extern int ixgbe_dcb_netlink_unregister(void);
 
+extern int ixgbe_sysfs_create(struct ixgbe_adapter *adapter);
+extern void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter);
+
+#ifdef CONFIG_IXGBE_NAPI
+extern void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
+#endif
 
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index ae6490b..1059032 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -30,12 +30,15 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
                                              ixgbe_link_speed *speed,
                                              bool *autoneg);
+s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg);
 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
@@ -56,37 +59,13 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 
rar, u32 vmdq);
 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
                          u32 vind, bool vlan_on);
 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index);
+static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                                 u8 *eeprom_data);
 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
-
-
-/**
- *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
- *  @hw: pointer to hardware structure
- *
- *  Read PCIe configuration space, and get the MSI-X vector count from
- *  the capabilities table.
- **/
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
-{
-       u32 msix_count = 18;
-
-       if (hw->mac.msix_vectors_from_pcie) {
-               msix_count = IXGBE_READ_PCIE_WORD(hw,
-                                                 IXGBE_PCIE_MSIX_82598_CAPS);
-               msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
-               /* MSI-X count is zero-based in HW, so increment to give
-                * proper value */
-               msix_count++;
-       }
-       return msix_count;
-}
 
 /**
  *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
@@ -100,13 +79,11 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
        struct ixgbe_mac_info *mac = &hw->mac;
        struct ixgbe_phy_info *phy = &hw->phy;
        s32 ret_val;
+       u16 list_offset, data_offset;
 
        ret_val = ixgbe_init_phy_ops_generic(hw);
        ret_val = ixgbe_init_ops_generic(hw);
 
-       /* PHY */
-       phy->ops.init = &ixgbe_init_phy_ops_82598;
-
        /* MAC */
        mac->ops.reset_hw = &ixgbe_reset_hw_82598;
        mac->ops.get_media_type = &ixgbe_get_media_type_82598;
@@ -114,7 +91,10 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
                                    &ixgbe_get_supported_physical_layer_82598;
        mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
        mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
-       mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
+
+       /* LEDs */
+       mac->ops.blink_led_start = &ixgbe_blink_led_start_82598;
+       mac->ops.blink_led_stop = &ixgbe_blink_led_stop_82598;
 
        /* RAR, Multicast, VLAN */
        mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
@@ -123,67 +103,42 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
        mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
 
        /* Flow Control */
-       mac->ops.fc_enable = &ixgbe_fc_enable_82598;
+       mac->ops.setup_fc = &ixgbe_setup_fc_82598;
+
+       /* Link */
+       mac->ops.check_link = &ixgbe_check_mac_link_82598;
+       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+               mac->ops.setup_link_speed =
+                                    &ixgbe_setup_copper_link_speed_82598;
+               mac->ops.get_link_capabilities =
+                                    &ixgbe_get_copper_link_capabilities_82598;
+       } else {
+               mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+               mac->ops.setup_link_speed = &ixgbe_setup_mac_link_speed_82598;
+               mac->ops.get_link_capabilities =
+                                      &ixgbe_get_link_capabilities_82598;
+       }
 
        mac->mcft_size       = 128;
        mac->vft_size        = 128;
        mac->num_rar_entries = 16;
        mac->max_tx_queues   = 32;
        mac->max_rx_queues   = 64;
-       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
 
        /* SFP+ Module */
        phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
 
-       /* Link */
-       mac->ops.check_link = &ixgbe_check_mac_link_82598;
-       mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
-       mac->ops.setup_link_speed = &ixgbe_setup_mac_link_speed_82598;
-       mac->ops.get_link_capabilities =
-                              &ixgbe_get_link_capabilities_82598;
-
-       return ret_val;
-}
-
-/**
- *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
- *  @hw: pointer to hardware structure
- *
- *  Initialize any function pointers that were not able to be
- *  set during init_shared_code because the PHY/SFP type was
- *  not known.  Perform the SFP init if necessary.
- *
- **/
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
-{
-       struct ixgbe_mac_info *mac = &hw->mac;
-       struct ixgbe_phy_info *phy = &hw->phy;
-       s32 ret_val = 0;
-       u16 list_offset, data_offset;
-
-
-       /* Identify the PHY */
+       /* Call PHY identify routine to get the phy type */
        phy->ops.identify(hw);
 
-       /* Overwrite the link function pointers if copper PHY */
-       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
-               mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
-               mac->ops.setup_link_speed =
-                                    &ixgbe_setup_copper_link_speed_82598;
-               mac->ops.get_link_capabilities =
-                                 &ixgbe_get_copper_link_capabilities_generic;
-       }
-
+       /* PHY Init */
        switch (hw->phy.type) {
        case ixgbe_phy_tn:
                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
                phy->ops.get_firmware_version =
                             &ixgbe_get_phy_firmware_version_tnx;
                break;
-       case ixgbe_phy_aq:
-               phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_aq;
-               break;
        case ixgbe_phy_nl:
                phy->ops.reset = &ixgbe_reset_phy_nl;
 
@@ -226,19 +181,12 @@ static s32 ixgbe_get_link_capabilities_82598(struct 
ixgbe_hw *hw,
                                              bool *autoneg)
 {
        s32 status = 0;
-       u32 autoc = 0;
 
        /*
         * Determine link capabilities based on the stored value of AUTOC,
-        * which represents EEPROM defaults.  If AUTOC value has not been
-        * stored, use the current register value.
+        * which represents EEPROM defaults.
         */
-       if (hw->mac.orig_link_settings_stored)
-               autoc = hw->mac.orig_autoc;
-       else
-               autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
-       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+       switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
        case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
                *autoneg = false;
@@ -257,9 +205,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct 
ixgbe_hw *hw,
        case IXGBE_AUTOC_LMS_KX4_AN:
        case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
                *speed = IXGBE_LINK_SPEED_UNKNOWN;
-               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+               if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
                        *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (autoc & IXGBE_AUTOC_KX_SUPP)
+               if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
                        *speed |= IXGBE_LINK_SPEED_1GB_FULL;
                *autoneg = true;
                break;
@@ -273,6 +221,38 @@ static s32 ixgbe_get_link_capabilities_82598(struct 
ixgbe_hw *hw,
 }
 
 /**
+ *  ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg)
+{
+       s32 status = IXGBE_ERR_LINK_SETUP;
+       u16 speed_ability;
+
+       *speed = 0;
+       *autoneg = true;
+
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     &speed_ability);
+
+       if (status == 0) {
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+       }
+
+       return status;
+}
+
+/**
  *  ixgbe_get_media_type_82598 - Determines media type
  *  @hw: pointer to hardware structure
  *
@@ -282,18 +262,9 @@ static enum ixgbe_media_type 
ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
 {
        enum ixgbe_media_type media_type;
 
-       /* Detect if there is a copper PHY attached. */
-       if (hw->phy.type == ixgbe_phy_cu_unknown ||
-           hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq) {
-               media_type = ixgbe_media_type_copper;
-               goto out;
-       }
-
        /* Media type for I82598 is based on device ID */
        switch (hw->device_id) {
        case IXGBE_DEV_ID_82598:
-       case IXGBE_DEV_ID_82598_BX:
                /* Default device ID is mezzanine card KX/KX4 */
                media_type = ixgbe_media_type_backplane;
                break;
@@ -314,7 +285,7 @@ static enum ixgbe_media_type 
ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
                media_type = ixgbe_media_type_unknown;
                break;
        }
-out:
+
        return media_type;
 }
 
@@ -332,17 +303,6 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 
packetbuf_num)
        u32 rmcs_reg;
        u32 reg;
 
-#ifdef CONFIG_DCB
-       if (hw->fc.requested_mode == ixgbe_fc_pfc)
-               goto out;
-
-#endif /* CONFIG_DCB */
-       /* Negotiate the fc mode to use */
-       ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val)
-               goto out;
-
-       /* Disable any previous flow control settings */
        fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
        fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
 
@@ -354,19 +314,14 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 
packetbuf_num)
         * 0: Flow control is completely disabled
         * 1: Rx flow control is enabled (we can receive pause frames,
         *    but not send pause frames).
-        * 2: Tx flow control is enabled (we can send pause frames but
+        * 2:  Tx flow control is enabled (we can send pause frames but
         *     we do not support receiving pause frames).
         * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-        * 4: Priority Flow Control is enabled.
-#endif
         * other: Invalid.
         */
        switch (hw->fc.current_mode) {
        case ixgbe_fc_none:
-               /* Flow control is disabled by software override or autoneg.
-                * The code below will actually disable it in the HW.
-                */
+               /* Flow control completely disabled by software override. */
                break;
        case ixgbe_fc_rx_pause:
                /*
@@ -391,11 +346,6 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 
packetbuf_num)
                fctrl_reg |= IXGBE_FCTRL_RFCE;
                rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
                break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
                ret_val = -IXGBE_ERR_CONFIG;
@@ -403,8 +353,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 
packetbuf_num)
                break;
        }
 
-       /* Set 802.3x based flow control settings. */
-       fctrl_reg |= IXGBE_FCTRL_DPF;
+       /* Enable 802.3x based flow control settings. */
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
        IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
 
@@ -423,7 +372,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 
packetbuf_num)
        }
 
        /* Configure pause time (2 TCs per register) */
-       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
+       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
        if ((packetbuf_num & 1) == 0)
                reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
        else
@@ -437,6 +386,64 @@ out:
 }
 
 /**
+ *  ixgbe_setup_fc_82598 - Set up flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets up flow control.
+ **/
+s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+       s32 ret_val = 0;
+
+       /* Validate the packetbuf configuration */
+       if (packetbuf_num < 0 || packetbuf_num > 7) {
+               hw_dbg(hw, "Invalid packet buffer number [%d], expected range 
is"
+                         " 0-7\n", packetbuf_num);
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /*
+        * Validate the water mark configuration.  Zero water marks are invalid
+        * because it causes the controller to just blast out fc packets.
+        */
+       if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
+               hw_dbg(hw, "Invalid water mark configuration\n");
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /*
+        * Validate the requested mode.  Strict IEEE mode does not allow
+        * ixgbe_fc_rx_pause because it will cause testing anomalies.
+        */
+       if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+               hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /*
+        * 10gig parts do not have a word in the EEPROM to determine the
+        * default flow control setting, so we explicitly set it to full.
+        */
+       if (hw->fc.requested_mode == ixgbe_fc_default)
+               hw->fc.requested_mode = ixgbe_fc_full;
+
+       /*
+        * Save off the requested flow control mode for use later.  Depending
+        * on the link partner's capabilities, we may or may not use this mode.
+        */
+       hw->fc.current_mode = hw->fc.requested_mode;
+
+
+       ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
+
+out:
+       return ret_val;
+}
+
+/**
  *  ixgbe_setup_mac_link_82598 - Configures MAC link settings
  *  @hw: pointer to hardware structure
  *
@@ -475,6 +482,9 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
                }
        }
 
+       /* Set up flow control */
+       status = ixgbe_setup_fc_82598(hw, 0);
+
        /* Add delay to filter out noises during initial link setup */
        msleep(50);
 
@@ -562,11 +572,6 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
        else
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
 
-       /* if link is down, zero out the current_mode */
-       if (*link_up == false) {
-               hw->fc.current_mode = ixgbe_fc_none;
-               hw->fc.fc_was_autonegged = false;
-       }
 out:
        return 0;
 }
@@ -636,10 +641,22 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct 
ixgbe_hw *hw,
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
 {
        s32 status;
+       u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc = curr_autoc;
 
        /* Restart autonegotiation on PHY */
        status = hw->phy.ops.setup_link(hw);
 
+       /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
+       autoc &= ~IXGBE_AUTOC_LMS_MASK;
+       autoc |= IXGBE_AUTOC_LMS_KX4_AN;
+
+       autoc &= ~(IXGBE_AUTOC_1G_PMA_PMD_MASK | IXGBE_AUTOC_10G_PMA_PMD_MASK);
+       autoc |= (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+
+       if (autoc != curr_autoc)
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+
        /* Set up MAC */
        ixgbe_setup_mac_link_82598(hw);
 
@@ -661,10 +678,23 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct 
ixgbe_hw *hw,
                                                bool autoneg_wait_to_complete)
 {
        s32 status;
+       u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc = curr_autoc;
 
        /* Setup the PHY according to input speed */
        status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
                                              autoneg_wait_to_complete);
+
+       /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
+       autoc &= ~IXGBE_AUTOC_LMS_MASK;
+       autoc |= IXGBE_AUTOC_LMS_KX4_AN;
+
+       autoc &= ~(IXGBE_AUTOC_1G_PMA_PMD_MASK | IXGBE_AUTOC_10G_PMA_PMD_MASK);
+       autoc |= (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+
+       if (autoc != curr_autoc)
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+
        /* Set up MAC */
        ixgbe_setup_mac_link_82598(hw);
 
@@ -682,7 +712,6 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct 
ixgbe_hw *hw,
 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 {
        s32 status = 0;
-       s32 phy_status = 0;
        u32 ctrl;
        u32 gheccr;
        u32 i;
@@ -726,26 +755,14 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
        }
 
        /* Reset PHY */
-       if (hw->phy.reset_disable == false) {
-               /* PHY ops must be identified and initialized prior to reset */
-
-               /* Init PHY and function pointers, perform SFP setup */
-               phy_status = hw->phy.ops.init(hw);
-               if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-                       goto reset_hw_out;
-               else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
-                       goto no_phy_reset;
-
+       if (hw->phy.reset_disable == false)
                hw->phy.ops.reset(hw);
-       }
 
-no_phy_reset:
        /*
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests before reset
         */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
+       if (ixgbe_disable_pcie_master(hw) != 0) {
                status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
                hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
        }
@@ -785,23 +802,14 @@ no_phy_reset:
        if (hw->mac.orig_link_settings_stored == false) {
                hw->mac.orig_autoc = autoc;
                hw->mac.orig_link_settings_stored = true;
-       } else if (autoc != hw->mac.orig_autoc)
+       }
+    else if (autoc != hw->mac.orig_autoc) {
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+       }
 
        /* Store the permanent mac address */
        hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
 
-       /*
-        * Store MAC address from RAR0, clear receive address registers, and
-        * clear the multicast table
-        */
-       hw->mac.ops.init_rx_addrs(hw);
-
-
-
-reset_hw_out:
-       if (phy_status != 0)
-               status = phy_status;
        return status;
 }
 
@@ -918,6 +926,61 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
 }
 
 /**
+ *  ixgbe_blink_led_start_82598 - Blink LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ **/
+static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
+{
+       ixgbe_link_speed speed = 0;
+       bool link_up = 0;
+       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+       /*
+        * Link must be up to auto-blink the LEDs on the 82598EB MAC;
+        * force it if link is down.
+        */
+       hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+       if (!link_up) {
+               autoc_reg |= IXGBE_AUTOC_FLU;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+               msleep(10);
+       }
+
+       led_reg &= ~IXGBE_LED_MODE_MASK(index);
+       led_reg |= IXGBE_LED_BLINK(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/**
+ *  ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to stop blinking
+ **/
+static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
+{
+       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+       autoc_reg &= ~IXGBE_AUTOC_FLU;
+       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+       led_reg &= ~IXGBE_LED_MODE_MASK(index);
+       led_reg &= ~IXGBE_LED_BLINK(index);
+       led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/**
  *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
  *  @hw: pointer to hardware structure
  *  @reg: analog register to read
@@ -1030,56 +1093,33 @@ out:
 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
 {
        u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-       u16 ext_ability = 0;
-
-       hw->phy.ops.identify(hw);
-
-       /* Copper PHY must be checked before AUTOC LMS to determine correct
-        * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
-       if (hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_cu_unknown) {
-               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
-               IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
-               if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-               if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-               if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-               goto out;
-       }
 
-       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-       case IXGBE_AUTOC_LMS_1G_AN:
-       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-               if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-               else
-                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82598:
+               /* Default device ID is mezzanine card KX/KX4 */
+               physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+                                 IXGBE_PHYSICAL_LAYER_1000BASE_KX);
                break;
-       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-               if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-               else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-               else /* XAUI */
-                       physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+       case IXGBE_DEV_ID_82598EB_CX4:
+       case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
                break;
-       case IXGBE_AUTOC_LMS_KX4_AN:
-       case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
-               if (autoc & IXGBE_AUTOC_KX_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-               if (autoc & IXGBE_AUTOC_KX4_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
                break;
-       default:
+       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
                break;
-       }
-
-       if (hw->phy.type == ixgbe_phy_nl) {
+       case IXGBE_DEV_ID_82598EB_XF_LR:
+               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+               break;
+       case IXGBE_DEV_ID_82598AT:
+               physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
+                                 IXGBE_PHYSICAL_LAYER_1000BASE_T);
+               break;
+       case IXGBE_DEV_ID_82598EB_SFP_LOM:
                hw->phy.ops.identify_sfp(hw);
 
                switch (hw->phy.sfp_type) {
@@ -1096,57 +1136,12 @@ u32 ixgbe_get_supported_physical_layer_82598(struct 
ixgbe_hw *hw)
                        physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
                        break;
                }
-       }
-
-       switch (hw->device_id) {
-       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-               break;
-       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-               break;
-       case IXGBE_DEV_ID_82598EB_XF_LR:
-               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
                break;
+
        default:
+               physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
                break;
        }
 
-out:
        return physical_layer;
 }
-
-/**
- *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
- *  port devices.
- *  @hw: pointer to the HW structure
- *
- *  Calls common function and corrects issue with some single port devices
- *  that enable LAN1 but not LAN0.
- **/
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
-{
-       struct ixgbe_bus_info *bus = &hw->bus;
-       u16 pci_gen, pci_ctrl2;
-
-       ixgbe_set_lan_id_multi_port_pcie(hw);
-
-       /* check if LAN0 is disabled */
-       hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
-       if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
-
-               hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
-
-               /* if LAN0 is completely disabled force function to 0 */
-               if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
-                   !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
-                   !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
-
-                       bus->func = 0;
-               }
-       }
-}
-
-
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
deleted file mode 100644
index 8040d0b..0000000
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ /dev/null
@@ -1,2626 +0,0 @@
-/*******************************************************************************
-
-  Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@xxxxxxxxxxxxxxxxxxxxx>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe_type.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-                                      ixgbe_link_speed *speed,
-                                      bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed, bool autoneg,
-                                     bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
-                               ixgbe_link_speed *speed,
-                               bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed,
-                                     bool autoneg,
-                                     bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed speed,
-                                               bool autoneg,
-                                               bool autoneg_wait_to_complete);
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
-s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan,
-                         u32 vind, bool vlan_on);
-s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
-                                        u16 *san_mac_offset);
-s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-
-
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
-{
-       struct ixgbe_mac_info *mac = &hw->mac;
-
-       if (hw->phy.multispeed_fiber) {
-               /* Set up dual speed SFP+ support */
-               mac->ops.setup_link =
-                         &ixgbe_setup_mac_link_multispeed_fiber;
-               mac->ops.setup_link_speed =
-                         &ixgbe_setup_mac_link_speed_multispeed_fiber;
-       } else {
-               mac->ops.setup_link =
-                       &ixgbe_setup_mac_link_82599;
-               mac->ops.setup_link_speed =
-                         &ixgbe_setup_mac_link_speed_82599;
-       }
-}
-
-/**
- *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
- *  @hw: pointer to hardware structure
- *
- *  Initialize any function pointers that were not able to be
- *  set during init_shared_code because the PHY/SFP type was
- *  not known.  Perform the SFP init if necessary.
- *
- **/
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
-{
-       struct ixgbe_mac_info *mac = &hw->mac;
-       struct ixgbe_phy_info *phy = &hw->phy;
-       s32 ret_val = 0;
-
-       /* Identify the PHY or SFP module */
-       ret_val = phy->ops.identify(hw);
-       if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               goto init_phy_ops_out;
-
-       /* Setup function pointers based on detected SFP module and speeds */
-       ixgbe_init_mac_link_ops_82599(hw);
-       if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
-               hw->phy.ops.reset = NULL;
-
-       /* If copper media, overwrite with copper function pointers */
-       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
-               mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
-               mac->ops.setup_link_speed =
-                                    &ixgbe_setup_copper_link_speed_82599;
-               mac->ops.get_link_capabilities =
-                                 &ixgbe_get_copper_link_capabilities_generic;
-       }
-
-       /* Set necessary function pointers based on phy type */
-       switch (hw->phy.type) {
-       case ixgbe_phy_tn:
-               phy->ops.check_link = &ixgbe_check_phy_link_tnx;
-               phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_tnx;
-               break;
-       case ixgbe_phy_aq:
-               phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_aq;
-               break;
-       default:
-               break;
-       }
-init_phy_ops_out:
-       return ret_val;
-}
-
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
-{
-       s32 ret_val = 0;
-       u16 list_offset, data_offset, data_value;
-
-       if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
-               ixgbe_init_mac_link_ops_82599(hw);
-
-               hw->phy.ops.reset = NULL;
-
-               ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
-                                                             &data_offset);
-               if (ret_val != 0)
-                       goto setup_sfp_out;
-
-               /* PHY config will finish before releasing the semaphore */
-               ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-               if (ret_val != 0) {
-                       ret_val = IXGBE_ERR_SWFW_SYNC;
-                       goto setup_sfp_out;
-               }
-
-               hw->eeprom.ops.read(hw, ++data_offset, &data_value);
-               while (data_value != 0xffff) {
-                       IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
-                       IXGBE_WRITE_FLUSH(hw);
-                       hw->eeprom.ops.read(hw, ++data_offset, &data_value);
-               }
-               /* Now restart DSP by setting Restart_AN */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
-                   (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
-
-               /* Release the semaphore */
-               ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-               /* Delay obtaining semaphore again to allow FW access */
-               msleep(hw->eeprom.semaphore_delay);
-       }
-
-setup_sfp_out:
-       return ret_val;
-}
-
-/**
- *  ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
- *  @hw: pointer to hardware structure
- *
- *  Read PCIe configuration space, and get the MSI-X vector count from
- *  the capabilities table.
- **/
-u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
-{
-       u32 msix_count = 64;
-
-       if (hw->mac.msix_vectors_from_pcie) {
-               msix_count = IXGBE_READ_PCIE_WORD(hw,
-                                                 IXGBE_PCIE_MSIX_82599_CAPS);
-               msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
-               /* MSI-X count is zero-based in HW, so increment to give
-                * proper value */
-               msix_count++;
-       }
-
-       return msix_count;
-}
-
-/**
- *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
- *  @hw: pointer to hardware structure
- *
- *  Initialize the function pointers and assign the MAC type for 82599.
- *  Does not touch the hardware.
- **/
-
-s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
-{
-       struct ixgbe_mac_info *mac = &hw->mac;
-       struct ixgbe_phy_info *phy = &hw->phy;
-       s32 ret_val;
-
-       ret_val = ixgbe_init_phy_ops_generic(hw);
-       ret_val = ixgbe_init_ops_generic(hw);
-
-       /* PHY */
-       phy->ops.identify = &ixgbe_identify_phy_82599;
-       phy->ops.init = &ixgbe_init_phy_ops_82599;
-
-       /* MAC */
-       mac->ops.reset_hw = &ixgbe_reset_hw_82599;
-       mac->ops.get_media_type = &ixgbe_get_media_type_82599;
-       mac->ops.get_supported_physical_layer =
-                                   &ixgbe_get_supported_physical_layer_82599;
-       mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
-       mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
-       mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
-       mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
-       mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_82599;
-       mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_82599;
-       mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
-
-       /* RAR, Multicast, VLAN */
-       mac->ops.set_vmdq = &ixgbe_set_vmdq_82599;
-       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82599;
-       mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_82599;
-       mac->rar_highwater = 1;
-       mac->ops.set_vfta = &ixgbe_set_vfta_82599;
-       mac->ops.clear_vfta = &ixgbe_clear_vfta_82599;
-       mac->ops.init_uta_tables = &ixgbe_init_uta_tables_82599;
-       mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
-
-       /* Link */
-       mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
-       mac->ops.check_link            = &ixgbe_check_mac_link_82599;
-       ixgbe_init_mac_link_ops_82599(hw);
-
-       mac->mcft_size        = 128;
-       mac->vft_size         = 128;
-       mac->num_rar_entries  = 128;
-       mac->max_tx_queues    = 128;
-       mac->max_rx_queues    = 128;
-       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
-
-
-       return ret_val;
-}
-
-/**
- *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @negotiation: true when autoneg or autotry is enabled
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-                                      ixgbe_link_speed *speed,
-                                      bool *negotiation)
-{
-       s32 status = 0;
-       u32 autoc = 0;
-
-       /*
-        * Determine link capabilities based on the stored value of AUTOC,
-        * which represents EEPROM defaults.  If AUTOC value has not
-        * been stored, use the current register values.
-        */
-       if (hw->mac.orig_link_settings_stored)
-               autoc = hw->mac.orig_autoc;
-       else
-               autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
-       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-               *speed = IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = false;
-               break;
-
-       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-               *speed = IXGBE_LINK_SPEED_10GB_FULL;
-               *negotiation = false;
-               break;
-
-       case IXGBE_AUTOC_LMS_1G_AN:
-               *speed = IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = true;
-               break;
-
-       case IXGBE_AUTOC_LMS_10G_SERIAL:
-               *speed = IXGBE_LINK_SPEED_10GB_FULL;
-               *negotiation = false;
-               break;
-
-       case IXGBE_AUTOC_LMS_KX4_KX_KR:
-       case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
-               *speed = IXGBE_LINK_SPEED_UNKNOWN;
-               if (autoc & IXGBE_AUTOC_KR_SUPP)
-                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (autoc & IXGBE_AUTOC_KX4_SUPP)
-                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (autoc & IXGBE_AUTOC_KX_SUPP)
-                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = true;
-               break;
-
-       case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
-               *speed = IXGBE_LINK_SPEED_100_FULL;
-               if (autoc & IXGBE_AUTOC_KR_SUPP)
-                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (autoc & IXGBE_AUTOC_KX4_SUPP)
-                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (autoc & IXGBE_AUTOC_KX_SUPP)
-                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = true;
-               break;
-
-       case IXGBE_AUTOC_LMS_SGMII_1G_100M:
-               *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
-               *negotiation = false;
-               break;
-
-       default:
-               status = IXGBE_ERR_LINK_SETUP;
-               goto out;
-               break;
-       }
-
-       if (hw->phy.multispeed_fiber) {
-               *speed |= IXGBE_LINK_SPEED_10GB_FULL |
-                         IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = true;
-       }
-
-out:
-       return status;
-}
-
-/**
- *  ixgbe_get_media_type_82599 - Get media type
- *  @hw: pointer to hardware structure
- *
- *  Returns the media type (fiber, copper, backplane)
- **/
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
-{
-       enum ixgbe_media_type media_type;
-
-       /* Detect if there is a copper PHY attached. */
-       if (hw->phy.type == ixgbe_phy_cu_unknown ||
-           hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq) {
-               media_type = ixgbe_media_type_copper;
-               goto out;
-       }
-
-       switch (hw->device_id) {
-       case IXGBE_DEV_ID_82599_KX4:
-       case IXGBE_DEV_ID_82599_XAUI_LOM:
-               /* Default device ID is mezzanine card KX/KX4 */
-               media_type = ixgbe_media_type_backplane;
-               break;
-       case IXGBE_DEV_ID_82599_SFP:
-               media_type = ixgbe_media_type_fiber;
-               break;
-       default:
-               media_type = ixgbe_media_type_unknown;
-               break;
-       }
-out:
-       return media_type;
-}
-
-/**
- *  ixgbe_setup_mac_link_82599 - Setup MAC link settings
- *  @hw: pointer to hardware structure
- *
- *  Configures link settings based on values in the ixgbe_hw struct.
- *  Restarts the link.  Performs autonegotiation if needed.
- **/
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
-{
-       u32 autoc_reg;
-       u32 links_reg;
-       u32 i;
-       s32 status = 0;
-
-
-       /* Restart link */
-       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
-       /* Only poll for autoneg to complete if specified to do so */
-       if (hw->phy.autoneg_wait_to_complete) {
-               if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-                    IXGBE_AUTOC_LMS_KX4_KX_KR ||
-                   (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-                    IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
-                   || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-                    IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
-                       links_reg = 0; /* Just in case Autoneg time = 0 */
-                       for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
-                               links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-                               if (links_reg & IXGBE_LINKS_KX_AN_COMP)
-                                       break;
-                               msleep(100);
-                       }
-                       if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
-                               status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-                               hw_dbg(hw, "Autoneg did not complete.\n");
-                       }
-               }
-       }
-
-       /* Add delay to filter out noises during initial link setup */
-       msleep(50);
-
-       return status;
-}
-
-/**
- *  ixgbe_setup_mac_link_multispeed_fiber - Setup MAC link settings
- *  @hw: pointer to hardware structure
- *
- *  Configures link settings based on values in the ixgbe_hw struct.
- *  Restarts the link for multi-speed fiber at 1G speed, if link
- *  fails at 10G.
- *  Performs autonegotiation if needed.
- **/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw)
-{
-       s32 status = 0;
-       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_82599_AUTONEG;
-       status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw,
-                                                      link_speed, true, true);
-       return status;
-}
-
-/**
- *  ixgbe_setup_mac_link_speed_multispeed_fiber - Set MAC link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg: true if autonegotiation enabled
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed, bool autoneg,
-                                     bool autoneg_wait_to_complete)
-{
-       s32 status = 0;
-       ixgbe_link_speed link_speed;
-       ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-       u32 speedcnt = 0;
-       u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-       u32 i = 0;
-       bool link_up = false;
-       bool negotiation;
-
-       /* Mask off requested but non-supported speeds */
-       status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
-       if (status != 0)
-               goto out;
-
-       speed &= link_speed;
-
-        /* Set autoneg_advertised value based on input link speed */
-       hw->phy.autoneg_advertised = 0;
-
-       if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
-       if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
-       /*
-        * When the driver changes the link speeds that it can support,
-        * it sets autotry_restart to true to indicate that we need to
-        * initiate a new autotry session with the link partner.  To do
-        * so, we set the speed then disable and re-enable the tx laser, to
-        * alert the link partner that it also needs to restart autotry on its
-        * end.  This is consistent with true clause 37 autoneg, which also
-        * involves a loss of signal.
-        */
-
-       /*
-        * Try each speed one by one, highest priority first.  We do this in
-        * software because 10gb fiber doesn't support speed autonegotiation.
-        */
-       if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
-               speedcnt++;
-               highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
-               /* If we already have link at this speed, just jump out */
-               status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-               if (status != 0)
-                       goto out;
-
-               if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
-                       goto out;
-
-               /* Set the module link speed */
-               esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-
-               /* Allow module to change analog characteristics (1G->10G) */
-               msleep(40);
-
-               status = ixgbe_setup_mac_link_speed_82599(
-                       hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
-                       autoneg_wait_to_complete);
-               if (status != 0)
-                       goto out;
-
-               /* Flap the tx laser if it has not already been done */
-               if (hw->mac.autotry_restart) {
-                       /* Disable tx laser; allow 100us to go dark per spec */
-                       esdp_reg |= IXGBE_ESDP_SDP3;
-                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-                       udelay(100);
-
-                       /* Enable tx laser; allow 2ms to light up per spec */
-                       esdp_reg &= ~IXGBE_ESDP_SDP3;
-                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-                       msleep(2);
-
-                       hw->mac.autotry_restart = false;
-               }
-
-               /* The controller may take up to 500ms at 10g to acquire link */
-               for (i = 0; i < 5; i++) {
-                       /* Wait for the link partner to also set speed */
-                       msleep(100);
-
-                       /* If we have link, just jump out */
-                       status = ixgbe_check_link(hw, &link_speed,
-                                                 &link_up, false);
-                       if (status != 0)
-                               goto out;
-
-                       if (link_up)
-                               goto out;
-               }
-       }
-
-       if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
-               speedcnt++;
-               if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
-                       highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
-               /* If we already have link at this speed, just jump out */
-               status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-               if (status != 0)
-                       goto out;
-
-               if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
-                       goto out;
-
-               /* Set the module link speed */
-               esdp_reg &= ~IXGBE_ESDP_SDP5;
-               esdp_reg |= IXGBE_ESDP_SDP5_DIR;
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-
-               /* Allow module to change analog characteristics (10G->1G) */
-               msleep(40);
-
-               status = ixgbe_setup_mac_link_speed_82599(
-                       hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
-                       autoneg_wait_to_complete);
-               if (status != 0)
-                       goto out;
-
-               /* Flap the tx laser if it has not already been done */
-               if (hw->mac.autotry_restart) {
-                       /* Disable tx laser; allow 100us to go dark per spec */
-                       esdp_reg |= IXGBE_ESDP_SDP3;
-                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-                       udelay(100);
-
-                       /* Enable tx laser; allow 2ms to light up per spec */
-                       esdp_reg &= ~IXGBE_ESDP_SDP3;
-                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-                       msleep(2);
-
-                       hw->mac.autotry_restart = false;
-               }
-
-               /* Wait for the link partner to also set speed */
-               msleep(100);
-
-               /* If we have link, just jump out */
-               status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-               if (status != 0)
-                       goto out;
-
-               if (link_up)
-                       goto out;
-       }
-
-       /*
-        * We didn't get link.  Configure back to the highest speed we tried,
-        * (if there was more than one).  We call ourselves back with just the
-        * single highest speed that the user requested.
-        */
-       if (speedcnt > 1)
-               status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw,
-                       highest_link_speed, autoneg, autoneg_wait_to_complete);
-
-out:
-       return status;
-}
-
-/**
- *  ixgbe_check_mac_link_82599 - Determine link and speed status
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @link_up: true when link is up
- *  @link_up_wait_to_complete: bool used to wait for link up or not
- *
- *  Reads the links register to determine if link is up and the current speed
- **/
-s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-                               bool *link_up, bool link_up_wait_to_complete)
-{
-       u32 links_reg;
-       u32 i;
-
-       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-       if (link_up_wait_to_complete) {
-               for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
-                       if (links_reg & IXGBE_LINKS_UP) {
-                               *link_up = true;
-                               break;
-                       } else {
-                               *link_up = false;
-                       }
-                       msleep(100);
-                       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-               }
-       } else {
-               if (links_reg & IXGBE_LINKS_UP)
-                       *link_up = true;
-               else
-                       *link_up = false;
-       }
-
-       if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
-           IXGBE_LINKS_SPEED_10G_82599)
-               *speed = IXGBE_LINK_SPEED_10GB_FULL;
-       else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
-                IXGBE_LINKS_SPEED_1G_82599)
-               *speed = IXGBE_LINK_SPEED_1GB_FULL;
-       else
-               *speed = IXGBE_LINK_SPEED_100_FULL;
-
-       /* if link is down, zero out the current_mode */
-       if (*link_up == false) {
-               hw->fc.current_mode = ixgbe_fc_none;
-               hw->fc.fc_was_autonegged = false;
-       }
-
-       return 0;
-}
-
-/**
- *  ixgbe_setup_mac_link_speed_82599 - Set MAC link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg: true if autonegotiation enabled
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed, bool autoneg,
-                                     bool autoneg_wait_to_complete)
-{
-       s32 status = 0;
-       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-       u32 start_autoc = autoc;
-       u32 orig_autoc = 0;
-       u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
-       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-       u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
-       u32 links_reg;
-       u32 i;
-       ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
-
-       /* Check to see if speed passed in is supported. */
-       status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
-       if (status != 0)
-               goto out;
-
-       speed &= link_capabilities;
-
-       if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
-               status = IXGBE_ERR_LINK_SETUP;
-               goto out;
-       }
-
-       /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
-       if (hw->mac.orig_link_settings_stored)
-               orig_autoc = hw->mac.orig_autoc;
-       else
-               orig_autoc = autoc;
-
-       if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
-                link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
-                link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
-               /* Set KX4/KX/KR support according to speed requested */
-               autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
-               if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-                       if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
-                               autoc |= IXGBE_AUTOC_KX4_SUPP;
-                       if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
-                               autoc |= IXGBE_AUTOC_KR_SUPP;
-               if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-                       autoc |= IXGBE_AUTOC_KX_SUPP;
-       } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
-                (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
-                 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
-               /* Switch from 1G SFI to 10G SFI if requested */
-               if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
-                   (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
-                       autoc &= ~IXGBE_AUTOC_LMS_MASK;
-                       autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
-               }
-       } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
-                (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
-               /* Switch from 10G SFI to 1G SFI if requested */
-               if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
-                   (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
-                       autoc &= ~IXGBE_AUTOC_LMS_MASK;
-                       if (autoneg)
-                               autoc |= IXGBE_AUTOC_LMS_1G_AN;
-                       else
-                               autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
-               }
-       }
-
-       if (autoc != start_autoc) {
-
-               /* Restart link */
-               autoc |= IXGBE_AUTOC_AN_RESTART;
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
-
-               /* Only poll for autoneg to complete if specified to do so */
-               if (autoneg_wait_to_complete) {
-                       if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
-                           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
-                           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
-                               links_reg = 0; /*Just in case Autoneg time=0*/
-                               for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
-                                       links_reg =
-                                              IXGBE_READ_REG(hw, IXGBE_LINKS);
-                                       if (links_reg & IXGBE_LINKS_KX_AN_COMP)
-                                               break;
-                                       msleep(100);
-                               }
-                               if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
-                                       status =
-                                               IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-                                       hw_dbg(hw, "Autoneg did not 
complete.\n");
-                               }
-                       }
-               }
-
-               /* Add delay to filter out noises during initial link setup */
-               msleep(50);
-       }
-
-out:
-       return status;
-}
-
-/**
- *  ixgbe_setup_copper_link_82599 - Setup copper link settings
- *  @hw: pointer to hardware structure
- *
- *  Restarts the link on PHY and then MAC. Performs autonegotiation if needed.
- **/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw)
-{
-       s32 status;
-
-       /* Restart autonegotiation on PHY */
-       status = hw->phy.ops.setup_link(hw);
-
-       /* Set up MAC */
-       ixgbe_setup_mac_link_82599(hw);
-
-       return status;
-}
-
-/**
- *  ixgbe_setup_copper_link_speed_82599 - Set the PHY autoneg advertised field
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg: true if autonegotiation enabled
- *  @autoneg_wait_to_complete: true if waiting is needed to complete
- *
- *  Restarts link on PHY and MAC based on settings passed in.
- **/
-static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed speed,
-                                               bool autoneg,
-                                               bool autoneg_wait_to_complete)
-{
-       s32 status;
-
-       /* Setup the PHY according to input speed */
-       status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
-                                             autoneg_wait_to_complete);
-       /* Set up MAC */
-       ixgbe_setup_mac_link_82599(hw);
-
-       return status;
-}
-/**
- *  ixgbe_reset_hw_82599 - Perform hardware reset
- *  @hw: pointer to hardware structure
- *
- *  Resets the hardware by resetting the transmit and receive units, masks
- *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
- *  reset.
- **/
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
-{
-       s32 status = 0;
-       u32 ctrl, ctrl_ext;
-       u32 i;
-       u32 autoc;
-       u32 autoc2;
-
-       /* Call adapter stop to disable tx/rx and clear interrupts */
-       hw->mac.ops.stop_adapter(hw);
-
-       /* PHY ops must be identified and initialized prior to reset */
-
-       /* Identify PHY and related function pointers */
-       status = hw->phy.ops.init(hw);
-
-       if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               goto reset_hw_out;
-
-
-       /* Setup SFP module if there is one present. */
-       if (hw->phy.sfp_setup_needed) {
-               status = hw->mac.ops.setup_sfp(hw);
-               hw->phy.sfp_setup_needed = false;
-       }
-
-       if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               goto reset_hw_out;
-
-       /* Reset PHY */
-       if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
-               hw->phy.ops.reset(hw);
-
-       /*
-        * Prevent the PCI-E bus from from hanging by disabling PCI-E master
-        * access and verify no pending requests before reset
-        */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
-               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
-       }
-
-       /*
-        * Issue global reset to the MAC.  This needs to be a SW reset.
-        * If link reset is used, it might reset the MAC when mng is using it
-        */
-       ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-       IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
-       IXGBE_WRITE_FLUSH(hw);
-
-       /* Poll for reset bit to self-clear indicating reset is complete */
-       for (i = 0; i < 10; i++) {
-               udelay(1);
-               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-               if (!(ctrl & IXGBE_CTRL_RST))
-                       break;
-       }
-       if (ctrl & IXGBE_CTRL_RST) {
-               status = IXGBE_ERR_RESET_FAILED;
-               hw_dbg(hw, "Reset polling failed to complete.\n");
-       }
-       /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
-       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
-       ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
-       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
-
-       msleep(50);
-
-
-
-       /*
-        * Store the original AUTOC/AUTOC2 values if they have not been
-        * stored off yet.  Otherwise restore the stored original
-        * values since the reset operation sets back to defaults.
-        */
-       autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-       if (hw->mac.orig_link_settings_stored == false) {
-               hw->mac.orig_autoc = autoc;
-               hw->mac.orig_autoc2 = autoc2;
-               hw->mac.orig_link_settings_stored = true;
-       } else {
-               if (autoc != hw->mac.orig_autoc)
-                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
-                                       IXGBE_AUTOC_AN_RESTART));
-
-               if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
-                   (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
-                       autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
-                       autoc2 |= (hw->mac.orig_autoc2 &
-                                  IXGBE_AUTOC2_UPPER_MASK);
-                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
-               }
-       }
-
-       /* Store the permanent mac address */
-       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
-       /*
-        * Store MAC address from RAR0, clear receive address registers, and
-        * clear the multicast table.  Also reset num_rar_entries to 128,
-        * since we modify this value when programming the SAN MAC address.
-        */
-       hw->mac.num_rar_entries = 128;
-       hw->mac.ops.init_rx_addrs(hw);
-
-
-
-       /* Store the permanent SAN mac address */
-       hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
-
-       /* Add the SAN MAC address to the RAR only if it's a valid address */
-       if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
-               hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
-               /* Reserve the last RAR for the SAN MAC address */
-               hw->mac.num_rar_entries--;
-       }
-
-reset_hw_out:
-       return status;
-}
-
-/**
- *  ixgbe_insert_mac_addr_82599 - Find a RAR for this mac address
- *  @hw: pointer to hardware structure
- *  @addr: Address to put into receive address register
- *  @vmdq: VMDq pool to assign
- *
- *  Puts an ethernet address into a receive address register, or
- *  finds the rar that it is aleady in; adds to the pool list
- **/
-s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
-       static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
-       u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
-       u32 rar;
-       u32 rar_low, rar_high;
-       u32 addr_low, addr_high;
-
-       /* swap bytes for HW little endian */
-       addr_low  = addr[0] | (addr[1] << 8)
-                           | (addr[2] << 16)
-                           | (addr[3] << 24);
-       addr_high = addr[4] | (addr[5] << 8);
-
-       /*
-        * Either find the mac_id in rar or find the first empty space.
-        * rar_highwater points to just after the highest currently used
-        * rar in order to shorten the search.  It grows when we add a new
-        * rar to the top.
-        */
-       for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
-               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
-
-               if (((IXGBE_RAH_AV & rar_high) == 0)
-                   && first_empty_rar == NO_EMPTY_RAR_FOUND) {
-                       first_empty_rar = rar;
-               } else if ((rar_high & 0xFFFF) == addr_high) {
-                       rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
-                       if (rar_low == addr_low)
-                               break;    /* found it already in the rars */
-               }
-       }
-
-       if (rar < hw->mac.rar_highwater) {
-               /* already there so just add to the pool bits */
-               ixgbe_set_vmdq(hw, rar, vmdq);
-       } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
-               /* stick it into first empty RAR slot we found */
-               rar = first_empty_rar;
-               ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
-       } else if (rar == hw->mac.rar_highwater) {
-               /* add it to the top of the list and inc the highwater mark */
-               ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
-               hw->mac.rar_highwater++;
-       } else if (rar >= hw->mac.num_rar_entries) {
-               return IXGBE_ERR_INVALID_MAC_ADDR;
-       }
-
-       /*
-        * If we found rar[0], make sure the default pool bit (we use pool 0)
-        * remains cleared to be sure default pool packets will get delivered
-        */
-       if (rar == 0)
-               ixgbe_clear_vmdq(hw, rar, 0);
-
-       return rar;
-}
-
-/**
- *  ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
- *  @hw: pointer to hardware struct
- *  @rar: receive address register index to disassociate
- *  @vmdq: VMDq pool index to remove from the rar
- **/
-s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-       u32 mpsar_lo, mpsar_hi;
-       u32 rar_entries = hw->mac.num_rar_entries;
-
-       if (rar < rar_entries) {
-               mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-               mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-
-               if (!mpsar_lo && !mpsar_hi)
-                       goto done;
-
-               if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
-                       if (mpsar_lo) {
-                               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
-                               mpsar_lo = 0;
-                       }
-                       if (mpsar_hi) {
-                               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
-                               mpsar_hi = 0;
-                       }
-               } else if (vmdq < 32) {
-                       mpsar_lo &= ~(1 << vmdq);
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
-               } else {
-                       mpsar_hi &= ~(1 << (vmdq - 32));
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
-               }
-
-               /* was that the last pool using this rar? */
-               if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
-                       hw->mac.ops.clear_rar(hw, rar);
-       } else {
-               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
-       }
-
-done:
-       return 0;
-}
-
-/**
- *  ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
- *  @hw: pointer to hardware struct
- *  @rar: receive address register index to associate with a VMDq index
- *  @vmdq: VMDq pool index
- **/
-s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-       u32 mpsar;
-       u32 rar_entries = hw->mac.num_rar_entries;
-
-       if (rar < rar_entries) {
-               if (vmdq < 32) {
-                       mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-                       mpsar |= 1 << vmdq;
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
-               } else {
-                       mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-                       mpsar |= 1 << (vmdq - 32);
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
-               }
-       } else {
-               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
-       }
-       return 0;
-}
-
-/**
- *  ixgbe_set_vfta_82599 - Set VLAN filter table
- *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
- *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *
- *  Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-                           bool vlan_on)
-{
-       u32 regindex;
-       u32 bitindex;
-       u32 bits;
-       u32 first_empty_slot;
-
-       if (vlan > 4095)
-               return IXGBE_ERR_PARAM;
-
-       /*
-        * this is a 2 part operation - first the VFTA, then the
-        * VLVF and VLVFB if vind is set
-        */
-
-       /* Part 1
-        * The VFTA is a bitstring made up of 128 32-bit registers
-        * that enable the particular VLAN id, much like the MTA:
-        *    bits[11-5]: which register
-        *    bits[4-0]:  which bit in the register
-        */
-       regindex = (vlan >> 5) & 0x7F;
-       bitindex = vlan & 0x1F;
-       bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-       if (vlan_on)
-               bits |= (1 << bitindex);
-       else
-               bits &= ~(1 << bitindex);
-       IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
-
-
-       /* Part 2
-        * If the vind is set
-        *   Either vlan_on
-        *     make sure the vlan is in VLVF
-        *     set the vind bit in the matching VLVFB
-        *   Or !vlan_on
-        *     clear the pool bit and possibly the vind
-        */
-       if (vind) {
-               /* find the vlanid or the first empty slot */
-               first_empty_slot = 0;
-
-               for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
-                       bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
-                       if (!bits && !first_empty_slot)
-                               first_empty_slot = regindex;
-                       else if ((bits & 0x0FFF) == vlan)
-                               break;
-               }
-
-               if (regindex >= IXGBE_VLVF_ENTRIES) {
-                       if (first_empty_slot)
-                               regindex = first_empty_slot;
-                       else {
-                               hw_dbg(hw, "No space in VLVF.\n");
-                               goto out;
-                       }
-               }
-
-
-               if (vlan_on) {
-                       /* set the pool bit */
-                       if (vind < 32) {
-                               bits =
-                                  IXGBE_READ_REG(hw, IXGBE_VLVFB(regindex*2));
-                               bits |= (1 << vind);
-                               IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB(regindex*2), bits);
-                       } else {
-                               bits = IXGBE_READ_REG(hw,
-                                                 IXGBE_VLVFB((regindex*2)+1));
-                               bits |= (1 << vind);
-                               IXGBE_WRITE_REG(hw,
-                                           IXGBE_VLVFB((regindex*2)+1), bits);
-                       }
-               } else {
-                       /* clear the pool bit */
-                       if (vind < 32) {
-                               bits = IXGBE_READ_REG(hw,
-                                    IXGBE_VLVFB(regindex*2));
-                       bits &= ~(1 << vind);
-                               IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB(regindex*2), bits);
-                               bits |= IXGBE_READ_REG(hw,
-                                                 IXGBE_VLVFB((regindex*2)+1));
-                       } else {
-                               bits = IXGBE_READ_REG(hw,
-                                                 IXGBE_VLVFB((regindex*2)+1));
-                               bits &= ~(1 << vind);
-                               IXGBE_WRITE_REG(hw,
-                                           IXGBE_VLVFB((regindex*2)+1), bits);
-                               bits |= IXGBE_READ_REG(hw,
-                                                     IXGBE_VLVFB(regindex*2));
-                       }
-               }
-
-               if (bits)
-                       IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
-                                                    (IXGBE_VLVF_VIEN | vlan));
-               else
-                       IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
-       }
-
-out:
-       return 0;
-}
-
-/**
- *  ixgbe_clear_vfta_82599 - Clear VLAN filter table
- *  @hw: pointer to hardware structure
- *
- *  Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
-{
-       u32 offset;
-
-       for (offset = 0; offset < hw->mac.vft_size; offset++)
-               IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
-       for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
-               IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
-       }
-
-       return 0;
-}
-
-/**
- *  ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
-{
-       int i;
-       hw_dbg(hw, " Clearing UTA\n");
-
-       for (i = 0; i < 128; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
-
-       return 0;
-}
-
-/**
- *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
-{
-       int i;
-       u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
-       fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
-
-       /*
-        * Before starting reinitialization process,
-        * FDIRCMD.CMD must be zero.
-        */
-       for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
-               if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
-                     IXGBE_FDIRCMD_CMD_MASK))
-                       break;
-               udelay(10);
-       }
-       if (i >= IXGBE_FDIRCMD_CMD_POLL) {
-               hw_dbg(hw, "Flow Director previous command isn't complete, "
-                        "aborting table re-initialization. \n");
-               return IXGBE_ERR_FDIR_REINIT_FAILED;
-       }
-
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
-       IXGBE_WRITE_FLUSH(hw);
-       /*
-        * 82599 adapters flow director init flow cannot be restarted,
-        * Workaround 82599 silicon errata by performing the following steps
-        * before re-writing the FDIRCTRL control register with the same value.
-        * - write 1 to bit 8 of FDIRCMD register &
-        * - write 0 to bit 8 of FDIRCMD register
-        */
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
-                        IXGBE_FDIRCMD_CLEARHT));
-       IXGBE_WRITE_FLUSH(hw);
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
-                        ~IXGBE_FDIRCMD_CLEARHT));
-       IXGBE_WRITE_FLUSH(hw);
-       /*
-        * Clear FDIR Hash register to clear any leftover hashes
-        * waiting to be programmed.
-        */
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
-       IXGBE_WRITE_FLUSH(hw);
-
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
-       IXGBE_WRITE_FLUSH(hw);
-
-       /* Poll init-done after we write FDIRCTRL register */
-       for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
-               if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
-                       break;
-               udelay(10);
-       }
-       if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
-               hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
-               return IXGBE_ERR_FDIR_REINIT_FAILED;
-       }
-
-       /* Clear FDIR statistics registers (read to clear) */
-       IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
-       IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
-       IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
-       IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-       IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
-
-       return 0;
-}
-
-/**
- *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature 
filters
- *  @hw: pointer to hardware structure
- *  @pballoc: which mode to allocate filters with
- **/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
-{
-       u32 fdirctrl = 0;
-       u32 pbsize;
-       int i;
-
-       /*
-        * Before enabling Flow Director, the Rx Packet Buffer size
-        * must be reduced.  The new value is the current size minus
-        * flow director memory usage size.
-        */
-       pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
-       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
-           (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
-       /*
-        * The defaults in the HW for RX PB 1-7 are not zero and so should be
-        * intialized to zero for non DCB mode otherwise actual total RX PB
-        * would be bigger than programmed and filter space would run into
-        * the PB 0 region.
-        */
-       for (i = 1; i < 8; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
-       /* Send interrupt when 64 filters are left */
-       fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
-       /* Set the maximum length per hash bucket to 0xA filters */
-       fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
-
-       switch (pballoc) {
-       case IXGBE_FDIR_PBALLOC_64K:
-               /* 8k - 1 signature filters */
-               fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
-               break;
-       case IXGBE_FDIR_PBALLOC_128K:
-               /* 16k - 1 signature filters */
-               fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
-               break;
-       case IXGBE_FDIR_PBALLOC_256K:
-               /* 32k - 1 signature filters */
-               fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
-               break;
-       default:
-               /* bad value */
-               return IXGBE_ERR_CONFIG;
-       };
-
-       /* Move the flexible bytes to use the ethertype - shift 6 words */
-       fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
-
-
-       /* Prime the keys for hashing */
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-                       IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-                       IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
-
-       /*
-        * Poll init-done after we write the register.  Estimated times:
-        *      10G: PBALLOC = 11b, timing is 60us
-        *       1G: PBALLOC = 11b, timing is 600us
-        *     100M: PBALLOC = 11b, timing is 6ms
-        *
-        *     Multiple these timings by 4 if under full Rx load
-        *
-        * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
-        * 1 msec per poll time.  If we're at line rate and drop to 100M, then
-        * this might not finish in our poll time, but we can live with that
-        * for now.
-        */
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
-       IXGBE_WRITE_FLUSH(hw);
-       for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
-               if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
-                       break;
-               msleep(1);
-       }
-       if (i >= IXGBE_FDIR_INIT_DONE_POLL)
-               hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
-
-       return 0;
-}
-
-/**
- *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
- *  @hw: pointer to hardware structure
- *  @pballoc: which mode to allocate filters with
- **/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
-{
-       u32 fdirctrl = 0;
-       u32 pbsize;
-       int i;
-
-       /*
-        * Before enabling Flow Director, the Rx Packet Buffer size
-        * must be reduced.  The new value is the current size minus
-        * flow director memory usage size.
-        */
-
-       pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
-       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
-           (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
-       /*
-        * The defaults in the HW for RX PB 1-7 are not zero and so should be
-        * intialized to zero for non DCB mode otherwise actual total RX PB
-        * would be bigger than programmed and filter space would run into
-        * the PB 0 region.
-        */
-       for (i = 1; i < 8; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
-       /* Send interrupt when 64 filters are left */
-       fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
-       switch (pballoc) {
-       case IXGBE_FDIR_PBALLOC_64K:
-               /* 2k - 1 perfect filters */
-               fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
-               break;
-       case IXGBE_FDIR_PBALLOC_128K:
-               /* 4k - 1 perfect filters */
-               fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
-               break;
-       case IXGBE_FDIR_PBALLOC_256K:
-               /* 8k - 1 perfect filters */
-               fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
-               break;
-       default:
-               /* bad value */
-               return IXGBE_ERR_CONFIG;
-       };
-
-       /* Turn perfect match filtering on */
-       fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
-       fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
-
-       /* Move the flexible bytes to use the ethertype - shift 6 words */
-       fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
-
-       /* Prime the keys for hashing */
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-                       IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-                       IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
-
-       /*
-        * Poll init-done after we write the register.  Estimated times:
-        *      10G: PBALLOC = 11b, timing is 60us
-        *       1G: PBALLOC = 11b, timing is 600us
-        *     100M: PBALLOC = 11b, timing is 6ms
-        *
-        *     Multiple these timings by 4 if under full Rx load
-        *
-        * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
-        * 1 msec per poll time.  If we're at line rate and drop to 100M, then
-        * this might not finish in our poll time, but we can live with that
-        * for now.
-        */
-
-       /* Set the maximum length per hash bucket to 0xA filters */
-       fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
-
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
-       IXGBE_WRITE_FLUSH(hw);
-       for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
-               if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
-                       break;
-               msleep(1);
-       }
-       if (i >= IXGBE_FDIR_INIT_DONE_POLL)
-               hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
-
-       return 0;
-}
-
-
-/**
- *  ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
- *  @stream: input bitstream to compute the hash on
- *  @key: 32-bit hash key
- **/
-u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
-{
-       /*
-        * The algorithm is as follows:
-        *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
-        *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
-        *    and A[n] x B[n] is bitwise AND between same length strings
-        *
-        *    K[n] is 16 bits, defined as:
-        *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
-        *       for n modulo 32 < 15, K[n] =
-        *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
-        *
-        *    S[n] is 16 bits, defined as:
-        *       for n >= 15, S[n] = S[n:n - 15]
-        *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
-        *
-        *    To simplify for programming, the algorithm is implemented
-        *    in software this way:
-        *
-        *    Key[31:0], Stream[335:0]
-        *
-        *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
-        *    int_key[350:0] = tmp_key[351:1]
-        *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
-        *
-        *    hash[15:0] = 0;
-        *    for (i = 0; i < 351; i++) {
-        *        if (int_key[i])
-        *            hash ^= int_stream[(i + 15):i];
-        *    }
-        */
-
-       union {
-               u64    fill[6];
-               u32    key[11];
-               u8     key_stream[44];
-       } tmp_key;
-
-       u8   *stream = (u8 *)atr_input;
-       u8   int_key[44];      /* upper-most bit unused */
-       u8   hash_str[46];     /* upper-most 2 bits unused */
-       u16  hash_result = 0;
-       int  i, j, k, h;
-
-       /*
-        * Initialize the fill member to prevent warnings
-        * on some compilers
-        */
-        tmp_key.fill[0] = 0;
-
-       /* First load the temporary key stream */
-       for (i = 0; i < 6; i++) {
-               u64 fillkey = ((u64)key << 32) | key;
-               tmp_key.fill[i] = fillkey;
-       }
-
-       /*
-        * Set the interim key for the hashing.  Bit 352 is unused, so we must
-        * shift and compensate when building the key.
-        */
-
-       int_key[0] = tmp_key.key_stream[0] >> 1;
-       for (i = 1, j = 0; i < 44; i++) {
-               unsigned int this_key = tmp_key.key_stream[j] << 7;
-               j++;
-               int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
-       }
-
-       /*
-        * Set the interim bit string for the hashing.  Bits 368 and 367 are
-        * unused, so shift and compensate when building the string.
-        */
-       hash_str[0] = (stream[40] & 0x7f) >> 1;
-       for (i = 1, j = 40; i < 46; i++) {
-               unsigned int this_str = stream[j] << 7;
-               j++;
-               if (j > 41)
-                       j = 0;
-               hash_str[i] = (u8)(this_str | (stream[j] >> 1));
-       }
-
-       /*
-        * Now compute the hash.  i is the index into hash_str, j is into our
-        * key stream, k is counting the number of bits, and h interates within
-        * each byte.
-        */
-       for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
-               for (h = 0; h < 8 && k < 351; h++, k++) {
-                       if (int_key[j] & (1 << h)) {
-                               /*
-                                * Key bit is set, XOR in the current 16-bit
-                                * string.  Example of processing:
-                                *    h = 0,
-                                *      tmp = (hash_str[i - 2] & 0 << 16) |
-                                *            (hash_str[i - 1] & 0xff << 8) |
-                                *            (hash_str[i] & 0xff >> 0)
-                                *      So tmp = hash_str[15 + k:k], since the
-                                *      i + 2 clause rolls off the 16-bit value
-                                *    h = 7,
-                                *      tmp = (hash_str[i - 2] & 0x7f << 9) |
-                                *            (hash_str[i - 1] & 0xff << 1) |
-                                *            (hash_str[i] & 0x80 >> 7)
-                                */
-                               int tmp = (hash_str[i] >> h);
-                               tmp |= (hash_str[i - 1] << (8 - h));
-                               tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
-                                            << (16 - h);
-                               hash_result ^= (u16)tmp;
-                       }
-               }
-       }
-
-       return hash_result;
-}
-
-/**
- *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- *  @input: input stream to modify
- *  @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
-{
-       input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
-       input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- *  @input: input stream to modify
- *  @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
-{
-       input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
-       input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
-                                                      (src_addr >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
-                                                       (src_addr >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- *  @input: input stream to modify
- *  @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
-{
-       input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
-       input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
-                                                      (dst_addr >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
-                                                       (dst_addr >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
- *  @input: input stream to modify
- *  @src_addr_1: the first 4 bytes of the IP address to load
- *  @src_addr_2: the second 4 bytes of the IP address to load
- *  @src_addr_3: the third 4 bytes of the IP address to load
- *  @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
-                                 u32 src_addr_1, u32 src_addr_2,
-                                 u32 src_addr_3, u32 src_addr_4)
-{
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
-                                                      (src_addr_4 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
-                                                     (src_addr_4 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
-
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
-                                                      (src_addr_3 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
-                                                     (src_addr_3 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
-
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
-                                                      (src_addr_2 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
-                                                     (src_addr_2 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
-
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
-                                                      (src_addr_1 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
-                                                     (src_addr_1 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
- *  @input: input stream to modify
- *  @dst_addr_1: the first 4 bytes of the IP address to load
- *  @dst_addr_2: the second 4 bytes of the IP address to load
- *  @dst_addr_3: the third 4 bytes of the IP address to load
- *  @dst_addr_4: the fourth 4 bytes of the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
-                                 u32 dst_addr_1, u32 dst_addr_2,
-                                 u32 dst_addr_3, u32 dst_addr_4)
-{
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
-                                                      (dst_addr_4 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
-                                                     (dst_addr_4 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
-
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
-                                                      (dst_addr_3 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
-                                                     (dst_addr_3 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
-
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
-                                                      (dst_addr_2 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
-                                                     (dst_addr_2 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
-
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
-                                                      (dst_addr_1 >> 8) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
-                                                     (dst_addr_1 >> 16) & 0xff;
-       input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_src_port_82599 - Sets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
- **/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
-{
-       input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
-       input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- **/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
-{
-       input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
-       input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
-{
-       input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
-       input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
- *  @input: input stream to modify
- *  @vm_pool: the Virtual Machine pool to load
- **/
-s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
-{
-       input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
-{
-       input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- *  @input: input stream to search
- *  @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
-{
-       *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
-       *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- *  @input: input stream to search
- *  @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
-{
-       *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
-       *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
-       *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
-       *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- *  @input: input stream to search
- *  @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
-{
-       *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
-       *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
-       *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
-       *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- *  @input: input stream to search
- *  @src_addr_1: the first 4 bytes of the IP address to load
- *  @src_addr_2: the second 4 bytes of the IP address to load
- *  @src_addr_3: the third 4 bytes of the IP address to load
- *  @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
-                                 u32 *src_addr_1, u32 *src_addr_2,
-                                 u32 *src_addr_3, u32 *src_addr_4)
-{
-       *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
-       *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
-       *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
-       *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
-       *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
-       *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
-       *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
-       *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
-       *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
-       *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
-       *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
-       *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
-       *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
-       *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
-       *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
-       *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
- *  @input: input stream to search
- *  @dst_addr_1: the first 4 bytes of the IP address to load
- *  @dst_addr_2: the second 4 bytes of the IP address to load
- *  @dst_addr_3: the third 4 bytes of the IP address to load
- *  @dst_addr_4: the fourth 4 bytes of the IP address to load
- **/
-s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
-                                 u32 *dst_addr_1, u32 *dst_addr_2,
-                                 u32 *dst_addr_3, u32 *dst_addr_4)
-{
-       *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
-       *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
-       *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
-       *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
-
-       *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
-       *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
-       *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
-       *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
-
-       *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
-       *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
-       *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
-       *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
-
-       *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
-       *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
-       *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
-       *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_port_82599 - Gets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
- *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
- **/
-s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
-{
-       *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
-       *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
- **/
-s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
-{
-       *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
-       *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 
*flex_byte)
-{
-       *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
-       *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
- *  @input: input stream to modify
- *  @vm_pool: the Virtual Machine pool to load
- **/
-s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
-{
-       *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
-{
-       *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
-
-       return 0;
-}
-
-/**
- *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
- *  @hw: pointer to hardware structure
- *  @stream: input bitstream
- *  @queue: queue index to direct traffic to
- **/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_atr_input *input,
-                                          u8 queue)
-{
-       u64  fdirhashcmd;
-       u64  fdircmd;
-       u32  fdirhash;
-       u16  bucket_hash, sig_hash;
-       u8   l4type;
-
-       bucket_hash = ixgbe_atr_compute_hash_82599(input,
-                                                  IXGBE_ATR_BUCKET_HASH_KEY);
-
-       /* bucket_hash is only 15 bits */
-       bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-       sig_hash = ixgbe_atr_compute_hash_82599(input,
-                                               IXGBE_ATR_SIGNATURE_HASH_KEY);
-
-       /* Get the l4type in order to program FDIRCMD properly */
-       /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
-       ixgbe_atr_get_l4type_82599(input, &l4type);
-
-       /*
-        * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
-        * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
-        */
-       fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-       fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-                  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
-       switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-       case IXGBE_ATR_L4TYPE_TCP:
-               fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-               break;
-       case IXGBE_ATR_L4TYPE_UDP:
-               fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-               break;
-       case IXGBE_ATR_L4TYPE_SCTP:
-               fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
-               break;
-       default:
-               hw_dbg(hw, " Error on l4type input\n");
-               return IXGBE_ERR_CONFIG;
-       }
-
-       if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
-               fdircmd |= IXGBE_FDIRCMD_IPV6;
-
-       fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
-       fdirhashcmd = ((fdircmd << 32) | fdirhash);
-
-       hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
-       IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
-
-       return 0;
-}
-
-/**
- *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
- *  @hw: pointer to hardware structure
- *  @input: input bitstream
- *  @queue: queue index to direct traffic to
- *
- *  Note that the caller to this function must lock before calling, since the
- *  hardware writes must be protected from one another.
- **/
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                        struct ixgbe_atr_input *input,
-                                        u16 soft_id,
-                                        u8 queue)
-{
-       u32 fdircmd = 0;
-       u32 fdirhash;
-       u32 src_ipv4, dst_ipv4;
-       u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
-       u16 src_port, dst_port, vlan_id, flex_bytes;
-       u16 bucket_hash;
-       u8  l4type;
-
-       /* Get our input values */
-       ixgbe_atr_get_l4type_82599(input, &l4type);
-
-       /*
-        * Check l4type formatting, and bail out before we touch the hardware
-        * if there's a configuration issue
-        */
-       switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-       case IXGBE_ATR_L4TYPE_TCP:
-               fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-               break;
-       case IXGBE_ATR_L4TYPE_UDP:
-               fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-               break;
-       case IXGBE_ATR_L4TYPE_SCTP:
-               fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
-               break;
-       default:
-               hw_dbg(hw, " Error on l4type input\n");
-               return IXGBE_ERR_CONFIG;
-       }
-
-       bucket_hash = ixgbe_atr_compute_hash_82599(input,
-                                                  IXGBE_ATR_BUCKET_HASH_KEY);
-
-       /* bucket_hash is only 15 bits */
-       bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-       ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
-       ixgbe_atr_get_src_port_82599(input, &src_port);
-       ixgbe_atr_get_dst_port_82599(input, &dst_port);
-       ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
-       fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-       /* Now figure out if we're IPv4 or IPv6 */
-       if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
-               /* IPv6 */
-               ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
-                                            &src_ipv6_3, &src_ipv6_4);
-
-               IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
-               IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
-               IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
-               /* The last 4 bytes is the same register as IPv4 */
-               IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
-               fdircmd |= IXGBE_FDIRCMD_IPV6;
-               fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
-       } else {
-               /* IPv4 */
-               ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
-               IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-
-       }
-
-       ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
-                                   (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
-                              (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
-       fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
-       fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
-       fdircmd |= IXGBE_FDIRCMD_LAST;
-       fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
-       fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
-
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
-
-       return 0;
-}
-
-/**
- *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
- *  @hw: pointer to hardware structure
- *  @reg: analog register to read
- *  @val: read value
- *
- *  Performs read operation to Omer analog register specified.
- **/
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
-       u32  core_ctl;
-
-       IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
-                       (reg << 8));
-       IXGBE_WRITE_FLUSH(hw);
-       udelay(10);
-       core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
-       *val = (u8)core_ctl;
-
-       return 0;
-}
-
-/**
- *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
- *  @hw: pointer to hardware structure
- *  @reg: atlas register to write
- *  @val: value to write
- *
- *  Performs write operation to Omer analog register specified.
- **/
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
-       u32  core_ctl;
-
-       core_ctl = (reg << 8) | val;
-       IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
-       IXGBE_WRITE_FLUSH(hw);
-       udelay(10);
-
-       return 0;
-}
-
-/**
- *  ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
- *  @hw: pointer to hardware structure
- *
- *  Starts the hardware using the generic start_hw function.
- *  Then performs revision-specific operations:
- *  Clears the rate limiter registers.
- **/
-s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
-{
-       u32 q_num;
-       s32 ret_val = 0;
-
-       ret_val = ixgbe_start_hw_generic(hw);
-
-       /* Clear the rate limiters */
-       for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) {
-               IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num);
-               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
-       }
-       IXGBE_WRITE_FLUSH(hw);
-
-       /* We need to run link autotry after the driver loads */
-       hw->mac.autotry_restart = true;
-
-       if (ret_val == 0)
-               ret_val = ixgbe_verify_fw_version_82599(hw);
-       return ret_val;
-}
-
-/**
- *  ixgbe_identify_phy_82599 - Get physical layer module
- *  @hw: pointer to hardware structure
- *
- *  Determines the physical layer module found on the current adapter.
- *  If PHY already detected, maintains current PHY type in hw struct,
- *  otherwise executes the PHY detection routine.
- **/
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
-{
-       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
-
-       /* Detect PHY if not unknown - returns success if already detected. */
-       status = ixgbe_identify_phy_generic(hw);
-       if (status != 0)
-               status = ixgbe_identify_sfp_module_generic(hw);
-       /* Set PHY type none if no PHY detected */
-       if (hw->phy.type == ixgbe_phy_unknown) {
-               hw->phy.type = ixgbe_phy_none;
-               status = 0;
-       }
-
-       /* Return error if SFP module has been detected but is not supported */
-       if (hw->phy.type == ixgbe_phy_sfp_unsupported)
-               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-
-       return status;
-}
-
-/**
- *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
-{
-       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-       u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
-       u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-       u16 ext_ability = 0;
-       u8 comp_codes_10g = 0;
-
-       hw->phy.ops.identify(hw);
-
-       if (hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq ||
-           hw->phy.type == ixgbe_phy_cu_unknown) {
-               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
-               IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
-               if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-               if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-               if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-               goto out;
-       }
-
-       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-       case IXGBE_AUTOC_LMS_1G_AN:
-       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-               if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
-                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
-                           IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-                       goto out;
-               } else
-                       /* SFI mode so read SFP module */
-                       goto sfp_check;
-               break;
-       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-               if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-               else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-               else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
-               goto out;
-               break;
-       case IXGBE_AUTOC_LMS_10G_SERIAL:
-               if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-                       goto out;
-               } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
-                       goto sfp_check;
-               break;
-       case IXGBE_AUTOC_LMS_KX4_KX_KR:
-       case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
-               if (autoc & IXGBE_AUTOC_KX_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-               if (autoc & IXGBE_AUTOC_KX4_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-               if (autoc & IXGBE_AUTOC_KR_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-               goto out;
-               break;
-       default:
-               goto out;
-               break;
-       }
-
-sfp_check:
-       /* SFP check must be done last since DA modules are sometimes used to
-        * test KR mode -  we need to id KR mode correctly before SFP module.
-        * Call identify_sfp because the pluggable module may have changed */
-       hw->phy.ops.identify_sfp(hw);
-       if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
-               goto out;
-
-       switch (hw->phy.type) {
-       case ixgbe_phy_tw_tyco:
-       case ixgbe_phy_tw_unknown:
-               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-               break;
-       case ixgbe_phy_sfp_avago:
-       case ixgbe_phy_sfp_ftl:
-       case ixgbe_phy_sfp_intel:
-       case ixgbe_phy_sfp_unknown:
-               hw->phy.ops.read_i2c_eeprom(hw,
-                     IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
-               if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-               else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-               break;
-       default:
-               break;
-       }
-
-out:
-       return physical_layer;
-}
-
-/**
- *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
- *  @hw: pointer to hardware structure
- *  @regval: register value to write to RXCTRL
- *
- *  Enables the Rx DMA unit for 82599
- **/
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
-{
-#define IXGBE_MAX_SECRX_POLL 30
-       int i;
-       int secrxreg;
-
-       /*
-        * Workaround for 82599 silicon errata when enabling the Rx datapath.
-        * If traffic is incoming before we enable the Rx unit, it could hang
-        * the Rx DMA unit.  Therefore, make sure the security engine is
-        * completely disabled prior to enabling the Rx unit.
-        */
-       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-       secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
-       for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
-               secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
-               if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
-                       break;
-               else
-                       /* Use interrupt-safe sleep just in case */
-                       udelay(10);
-       }
-
-       /* For informational purposes only */
-       if (i >= IXGBE_MAX_SECRX_POLL)
-               hw_dbg(hw, "Rx unit being enabled before security "
-                        "path fully disabled.  Continuing with init.\n");
-
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
-       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-       secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
-       IXGBE_WRITE_FLUSH(hw);
-
-       return 0;
-}
-
-/**
- *  ixgbe_get_device_caps_82599 - Get additional device capabilities
- *  @hw: pointer to hardware structure
- *  @device_caps: the EEPROM word with the extra device capabilities
- *
- *  This function will read the EEPROM location for the device capabilities,
- *  and return the word through device_caps.
- **/
-s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
-{
-       hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
-
-       return 0;
-}
-
-/**
- *  ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
- *  @hw: pointer to hardware structure
- *  @san_mac_offset: SAN MAC address offset
- *
- *  This function will read the EEPROM location for the SAN MAC address
- *  pointer, and returns the value at that location.  This is used in both
- *  get and set mac_addr routines.
- **/
-s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
-                                        u16 *san_mac_offset)
-{
-       /*
-        * First read the EEPROM pointer to see if the MAC addresses are
-        * available.
-        */
-       hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
-
-       return 0;
-}
-
-/**
- *  ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
- *  per-port, so set_lan_id() must be called before reading the addresses.
- *  set_lan_id() is called by identify_sfp(), but this cannot be relied
- *  upon for non-SFP connections, so we must call it here.
- **/
-s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-       u16 san_mac_data, san_mac_offset;
-       u8 i;
-
-       /*
-        * First read the EEPROM pointer to see if the MAC addresses are
-        * available.  If they're not, no point in calling set_lan_id() here.
-        */
-       ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
-
-       if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
-               /*
-                * No addresses available in this EEPROM.  It's not an
-                * error though, so just wipe the local address and return.
-                */
-               for (i = 0; i < 6; i++)
-                       san_mac_addr[i] = 0xFF;
-
-               goto san_mac_addr_out;
-       }
-
-       /* make sure we know which port we need to program */
-       hw->mac.ops.set_lan_id(hw);
-       /* apply the port offset to the address offset */
-       (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
-                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
-       for (i = 0; i < 3; i++) {
-               hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
-               san_mac_addr[i * 2] = (u8)(san_mac_data);
-               san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
-               san_mac_offset++;
-       }
-
-san_mac_addr_out:
-       return 0;
-}
-
-/**
- *  ixgbe_set_san_mac_addr_82599 - Write the SAN MAC address to the EEPROM
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Write a SAN MAC address to the EEPROM.
- **/
-s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-       s32 status = 0;
-       u16 san_mac_data, san_mac_offset;
-       u8 i;
-
-       /* Look for SAN mac address pointer.  If not defined, return */
-       ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
-
-       if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
-               status = IXGBE_ERR_NO_SAN_ADDR_PTR;
-               goto san_mac_addr_out;
-       }
-
-       /* Make sure we know which port we need to write */
-       hw->mac.ops.set_lan_id(hw);
-       /* Apply the port offset to the address offset */
-       (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
-                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
-
-       for (i = 0; i < 3; i++) {
-               san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
-               san_mac_data |= (u16)(san_mac_addr[i * 2]);
-               hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
-               san_mac_offset++;
-       }
-
-san_mac_addr_out:
-       return status;
-}
-
-/**
- *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
- *  @hw: pointer to hardware structure
- *
- *  Verifies that installed the firmware version is 0.6 or higher
- *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
- *
- *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
- *  if the FW version is not supported.
- **/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
-{
-       s32 status = IXGBE_ERR_EEPROM_VERSION;
-       u16 fw_offset, fw_ptp_cfg_offset;
-       u16 fw_version = 0;
-
-       /* firmware check is only necessary for SFI devices */
-       if (hw->phy.media_type != ixgbe_media_type_fiber) {
-               status = 0;
-               goto fw_version_out;
-       }
-
-       /* get the offset to the Firmware Module block */
-       hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
-
-       if ((fw_offset == 0) || (fw_offset == 0xFFFF))
-               goto fw_version_out;
-
-       /* get the offset to the Pass Through Patch Configuration block */
-       hw->eeprom.ops.read(hw, (fw_offset +
-                                IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
-                                &fw_ptp_cfg_offset);
-
-       if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
-               goto fw_version_out;
-
-       /* get the firmware version */
-       hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
-                                IXGBE_FW_PATCH_VERSION_4),
-                                &fw_version);
-
-       if (fw_version > 0x5)
-               status = 0;
-
-fw_version_out:
-       return status;
-}
diff --git a/drivers/net/ixgbe/ixgbe_api.c b/drivers/net/ixgbe/ixgbe_api.c
index 89bfb76..3967594 100644
--- a/drivers/net/ixgbe/ixgbe_api.c
+++ b/drivers/net/ixgbe/ixgbe_api.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@
 #include "ixgbe_common.h"
 
 extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
 
 /**
  *  ixgbe_init_shared_code - Initialize the shared code
@@ -56,9 +55,6 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
        case ixgbe_mac_82598EB:
                status = ixgbe_init_ops_82598(hw);
                break;
-       case ixgbe_mac_82599EB:
-               status = ixgbe_init_ops_82599(hw);
-               break;
        default:
                status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
                break;
@@ -81,7 +77,6 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
        if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
                switch (hw->device_id) {
                case IXGBE_DEV_ID_82598:
-               case IXGBE_DEV_ID_82598_BX:
                case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
                case IXGBE_DEV_ID_82598AF_DUAL_PORT:
                case IXGBE_DEV_ID_82598AT:
@@ -93,11 +88,6 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
                case IXGBE_DEV_ID_82598EB_SFP_LOM:
                        hw->mac.type = ixgbe_mac_82598EB;
                        break;
-               case IXGBE_DEV_ID_82599_KX4:
-               case IXGBE_DEV_ID_82599_XAUI_LOM:
-               case IXGBE_DEV_ID_82599_SFP:
-                       hw->mac.type = ixgbe_mac_82599EB;
-                       break;
                default:
                        ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
                        break;
@@ -194,46 +184,6 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
 }
 
 /**
- *  ixgbe_get_san_mac_addr - Get SAN MAC address
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
- *  per-port, so set_lan_id() must be called before reading the addresses.
- **/
-s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-       return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
-                              (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_set_san_mac_addr - Write a SAN MAC address
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Writes A SAN MAC address to the EEPROM.
- **/
-s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-       return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
-                              (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_device_caps - Get additional device capabilities
- *  @hw: pointer to hardware structure
- *  @device_caps: the EEPROM word for device capabilities
- *
- *  Reads the extra device capabilities from the EEPROM
- **/
-s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
-{
-       return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
-                              (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
  *  ixgbe_get_bus_info - Set PCI bus info
  *  @hw: pointer to hardware structure
  *
@@ -360,9 +310,6 @@ s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 
*firmware_version)
 s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
                        u16 *phy_data)
 {
-       if (hw->phy.id == 0)
-               ixgbe_identify_phy(hw);
-
        return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
                               device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
 }
@@ -378,9 +325,6 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 
u32 device_type,
 s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
                         u16 phy_data)
 {
-       if (hw->phy.id == 0)
-               ixgbe_identify_phy(hw);
-
        return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
                               device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
 }
@@ -604,22 +548,6 @@ s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
 }
 
 /**
- *  ixgbe_insert_mac_addr - Find a RAR for this mac address
- *  @hw: pointer to hardware structure
- *  @addr: Address to put into receive address register
- *  @vmdq: VMDq pool to assign
- *
- *  Puts an ethernet address into a receive address register, or
- *  finds the rar that it is aleady in; adds to the pool list
- **/
-s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
-       return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
-                              (hw, addr, vmdq),
-                              IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
  *  ixgbe_set_rar - Set Rx address register
  *  @hw: pointer to hardware structure
  *  @index: Receive address register to write
@@ -787,15 +715,15 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 
vind, bool vlan_on)
 }
 
 /**
- *  ixgbe_fc_enable - Enable flow control
+ *  ixgbe_setup_fc - Set flow control
  *  @hw: pointer to hardware structure
  *  @packetbuf_num: packet buffer number (0-7)
  *
  *  Configures the flow control settings based on SW configuration.
  **/
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
 {
-       return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num),
+       return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw, packetbuf_num),
                               IXGBE_NOT_IMPLEMENTED);
 }
 
@@ -841,53 +769,6 @@ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
 }
 
 /**
- *  ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to read
- *  @data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-                        u8 *data)
-{
-       return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
-                              dev_addr, data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_i2c_byte - Writes 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to write
- *  @data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface
- *  at a specified device address.
- **/
-s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-                         u8 data)
-{
-       return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
-                              dev_addr, data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to write
- *  @eeprom_data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
-                           u8 byte_offset, u8 eeprom_data)
-{
-       return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
-                              (hw, byte_offset, eeprom_data),
-                              IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
  *  ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
  *  @hw: pointer to hardware structure
  *  @byte_offset: EEPROM byte offset to read
@@ -913,45 +794,3 @@ u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
        return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
                               (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
 }
-
-/**
- *  ixgbe_enable_rx_dma - Enables Rx DMA unit, dependant on device specifics
- *  @hw: pointer to hardware structure
- *  @regval: bitfield to write to the Rx DMA register
- *
- *  Enables the Rx DMA unit of the device.
- **/
-s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
-{
-       return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
-                              (hw, regval), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to acquire
- *
- *  Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
- *  function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
-{
-       return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
-                              (hw, mask), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_release_swfw_semaphore - Release SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to release
- *
- *  Releases the SWFW semaphore through SW_FW_SYNC register for the specified
- *  function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
-{
-       if (hw->mac.ops.release_swfw_sync)
-               hw->mac.ops.release_swfw_sync(hw, mask);
-}
-
diff --git a/drivers/net/ixgbe/ixgbe_api.h b/drivers/net/ixgbe/ixgbe_api.h
index 3552f79..ab9df90 100644
--- a/drivers/net/ixgbe/ixgbe_api.h
+++ b/drivers/net/ixgbe/ixgbe_api.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -78,7 +78,6 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 
*data);
 s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
 s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
 
-s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
 s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
                   u32 enable_addr);
 s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
@@ -96,7 +95,7 @@ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
 s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
                    u32 vind, bool vlan_on);
 
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
 
 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
 s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
@@ -106,57 +105,5 @@ s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, 
u8 val);
 s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
 s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 
*eeprom_data);
 u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_atr_input *input,
-                                          u8 queue);
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                        struct ixgbe_atr_input *input,
-                                        u16 soft_id,
-                                        u8 queue);
-u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan_id);
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr);
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr);
-s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, u32 src_addr_1,
-                                 u32 src_addr_2, u32 src_addr_3,
-                                 u32 src_addr_4);
-s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 dst_addr_1,
-                                 u32 dst_addr_2, u32 dst_addr_3,
-                                 u32 dst_addr_4);
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port);
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port);
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 
flex_byte);
-s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool);
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type);
-s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan_id);
-s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr);
-s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr);
-s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, u32 
*src_addr_1,
-                                 u32 *src_addr_2, u32 *src_addr_3,
-                                 u32 *src_addr_4);
-s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 
*dst_addr_1,
-                                 u32 *dst_addr_2, u32 *dst_addr_3,
-                                 u32 *dst_addr_4);
-s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port);
-s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port);
-s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
-                                  u16 *flex_byte);
-s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool);
-s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type);
-s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-                        u8 *data);
-s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-                         u8 data);
-s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 
eeprom_data);
-s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
-
 
 #endif /* _IXGBE_API_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e4b3055..8801042 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -42,7 +42,11 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 
*eec);
 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
 static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
 
+static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
+static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
 
 /**
  *  ixgbe_init_ops_generic - Inits function ptrs
@@ -75,24 +79,20 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
        mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
        mac->ops.get_media_type = NULL;
        mac->ops.get_supported_physical_layer = NULL;
-       mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
        mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
        mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
        mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
        mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
-       mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
-       mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
 
        /* LEDs */
        mac->ops.led_on = &ixgbe_led_on_generic;
        mac->ops.led_off = &ixgbe_led_off_generic;
-       mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
-       mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
+       mac->ops.blink_led_start = NULL;
+       mac->ops.blink_led_stop = NULL;
 
        /* RAR, Multicast, VLAN */
        mac->ops.set_rar = &ixgbe_set_rar_generic;
        mac->ops.clear_rar = &ixgbe_clear_rar_generic;
-       mac->ops.insert_mac_addr = NULL;
        mac->ops.set_vmdq = NULL;
        mac->ops.clear_vmdq = NULL;
        mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
@@ -104,8 +104,6 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
        mac->ops.set_vfta = NULL;
        mac->ops.init_uta_tables = NULL;
 
-       /* Flow Control */
-       mac->ops.fc_enable = &ixgbe_fc_enable_generic;
 
        /* Link */
        mac->ops.get_link_capabilities = NULL;
@@ -128,16 +126,28 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 {
        u32 ctrl_ext;
-       s32 ret_val = 0;
 
        /* Set the media type */
        hw->phy.media_type = hw->mac.ops.get_media_type(hw);
 
-       /* PHY ops initialization must be done in reset_hw() */
+       /* Set bus info */
+       hw->mac.ops.get_bus_info(hw);
+
+       /* Identify the PHY */
+       hw->phy.ops.identify(hw);
+
+       /*
+        * Store MAC address from RAR0, clear receive address registers, and
+        * clear the multicast table
+        */
+       hw->mac.ops.init_rx_addrs(hw);
 
        /* Clear the VLAN filter table */
        hw->mac.ops.clear_vfta(hw);
 
+       /* Set up link */
+       hw->mac.ops.setup_link(hw);
+
        /* Clear statistics registers */
        hw->mac.ops.clear_hw_cntrs(hw);
 
@@ -147,13 +157,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
        IXGBE_WRITE_FLUSH(hw);
 
-       /* Setup flow control */
-       ixgbe_setup_fc(hw, 0);
-
        /* Clear adapter stopped flag */
        hw->adapter_stopped = false;
 
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -168,17 +175,13 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
-
        /* Reset the hardware */
-       status = hw->mac.ops.reset_hw(hw);
+       hw->mac.ops.reset_hw(hw);
 
-       if (status == 0) {
-               /* Start the HW */
-               status = hw->mac.ops.start_hw(hw);
-       }
+       /* Start the HW */
+       hw->mac.ops.start_hw(hw);
 
-       return status;
+       return 0;
 }
 
 /**
@@ -204,28 +207,15 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
        IXGBE_READ_REG(hw, IXGBE_RLEC);
        IXGBE_READ_REG(hw, IXGBE_LXONTXC);
        IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
-       if (hw->mac.type >= ixgbe_mac_82599EB) {
-               IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-               IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
-       } else {
-               IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-               IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-       }
+       IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+       IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
 
        for (i = 0; i < 8; i++) {
                IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
                IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
-               if (hw->mac.type >= ixgbe_mac_82599EB) {
-                       IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
-                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
-               } else {
-                       IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
-                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
-               }
+               IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+               IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
        }
-       if (hw->mac.type >= ixgbe_mac_82599EB)
-               for (i = 0; i < 8; i++)
-                       IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
        IXGBE_READ_REG(hw, IXGBE_PRC64);
        IXGBE_READ_REG(hw, IXGBE_PRC127);
        IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -392,7 +382,6 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
 
        reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
        bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
-       bus->lan_id = bus->func;
 
        /* check for a port swap */
        reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
@@ -597,6 +586,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 
offset, u16 data)
                ixgbe_shift_out_eeprom_bits(hw, data, 16);
                ixgbe_standby_eeprom(hw);
 
+               msleep(hw->eeprom.semaphore_delay);
                /* Done with writing - release the EEPROM */
                ixgbe_release_eeprom(hw);
        }
@@ -785,10 +775,13 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_EEPROM;
-       u32 timeout = 2000;
+       u32 timeout;
        u32 i;
        u32 swsm;
 
+       /* Set timeout value based on size of EEPROM */
+       timeout = hw->eeprom.word_size + 1;
+
        /* Get SMBI software semaphore between device drivers first */
        for (i = 0; i < timeout; i++) {
                /*
@@ -800,7 +793,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                        status = 0;
                        break;
                }
-               udelay(50);
+               msleep(1);
        }
 
        /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -828,14 +821,11 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                 * was not granted because we don't have access to the EEPROM
                 */
                if (i >= timeout) {
-                       hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
+                       hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
                                 "not granted.\n");
                        ixgbe_release_eeprom_semaphore(hw);
                        status = IXGBE_ERR_EEPROM;
                }
-       } else {
-               hw_dbg(hw, "Software semaphore SMBI between device drivers "
-                        "not granted.\n");
        }
 
        return status;
@@ -1068,9 +1058,6 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
 
        ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
-       /* Delay before attempt to obtain semaphore again to allow FW access */
-       msleep(hw->eeprom.semaphore_delay);
 }
 
 /**
@@ -1300,6 +1287,38 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 
index)
 }
 
 /**
+ *  ixgbe_enable_rar - Enable Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: index into the RAR table
+ *
+ *  Enables the select receive address register.
+ **/
+static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
+{
+       u32 rar_high;
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+       rar_high |= IXGBE_RAH_AV;
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+}
+
+/**
+ *  ixgbe_disable_rar - Disable Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: index into the RAR table
+ *
+ *  Disables the select receive address register.
+ **/
+static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
+{
+       u32 rar_high;
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+       rar_high &= (~IXGBE_RAH_AV);
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+}
+
+/**
  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
  *  @hw: pointer to hardware structure
  *
@@ -1350,6 +1369,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
        }
 
        /* Clear the MTA */
+       hw->addr_ctrl.mc_addr_in_rar_count = 0;
        hw->addr_ctrl.mta_in_use = 0;
        IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
@@ -1382,7 +1402,8 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 
vmdq)
         * else put the controller into promiscuous mode
         */
        if (hw->addr_ctrl.rar_used_count < rar_entries) {
-               rar = hw->addr_ctrl.rar_used_count;
+               rar = hw->addr_ctrl.rar_used_count -
+                     hw->addr_ctrl.mc_addr_in_rar_count;
                hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
                hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
                hw->addr_ctrl.rar_used_count++;
@@ -1421,13 +1442,14 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw 
*hw, u8 *addr_list,
         * Clear accounting of old secondary address list,
         * don't count RAR[0]
         */
-       uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+       uc_addr_in_use = hw->addr_ctrl.rar_used_count -
+                        hw->addr_ctrl.mc_addr_in_rar_count - 1;
        hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
        hw->addr_ctrl.overflow_promisc = 0;
 
        /* Zero out the other receive addresses */
-       hw_dbg(hw, "Clearing RAR[1-%d]\n", hw->addr_ctrl.rar_used_count);
-       for (i = 1; i <= hw->addr_ctrl.rar_used_count; i++) {
+       hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
+       for (i = 1; i <= uc_addr_in_use; i++) {
                IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
                IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
        }
@@ -1536,6 +1558,40 @@ void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
 }
 
 /**
+ *  ixgbe_add_mc_addr - Adds a multicast address.
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: new multicast address
+ *
+ *  Adds it to unused receive address register or to the multicast table.
+ **/
+void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+       u32 rar_entries = hw->mac.num_rar_entries;
+       u32 rar;
+
+       hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
+                 mc_addr[0], mc_addr[1], mc_addr[2],
+                 mc_addr[3], mc_addr[4], mc_addr[5]);
+
+       /*
+        * Place this multicast address in the RAR if there is room,
+        * else put it in the MTA
+        */
+       if (hw->addr_ctrl.rar_used_count < rar_entries) {
+               /* use RAR from the end up for multicast */
+               rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
+               hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
+               hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
+               hw->addr_ctrl.rar_used_count++;
+               hw->addr_ctrl.mc_addr_in_rar_count++;
+       } else {
+               ixgbe_set_mta(hw, mc_addr);
+       }
+
+       hw_dbg(hw, "ixgbe_add_mc_addr Complete\n");
+}
+
+/**
  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
  *  @hw: pointer to hardware structure
  *  @mc_addr_list: the list of new multicast addresses
@@ -1551,6 +1607,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw 
*hw, u8 *mc_addr_list,
                                       u32 mc_addr_count, ixgbe_mc_addr_itr 
next)
 {
        u32 i;
+       u32 rar_entries = hw->mac.num_rar_entries;
        u32 vmdq;
 
        /*
@@ -1558,8 +1615,18 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw 
*hw, u8 *mc_addr_list,
         * use.
         */
        hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+       hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count;
+       hw->addr_ctrl.mc_addr_in_rar_count = 0;
        hw->addr_ctrl.mta_in_use = 0;
 
+       /* Zero out the other receive addresses. */
+       hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
+                 rar_entries - 1);
+       for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+       }
+
        /* Clear the MTA */
        hw_dbg(hw, " Clearing MTA\n");
        for (i = 0; i < hw->mac.mcft_size; i++)
@@ -1568,7 +1635,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw 
*hw, u8 *mc_addr_list,
        /* Add the new addresses */
        for (i = 0; i < mc_addr_count; i++) {
                hw_dbg(hw, " Adding the multicast addresses:\n");
-               ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+               ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
        }
 
        /* Enable mta */
@@ -1588,8 +1655,15 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw 
*hw, u8 *mc_addr_list,
  **/
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
 {
+       u32 i;
+       u32 rar_entries = hw->mac.num_rar_entries;
        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
+       if (a->mc_addr_in_rar_count > 0)
+               for (i = (rar_entries - a->mc_addr_in_rar_count);
+                    i < rar_entries; i++)
+                       ixgbe_enable_rar(hw, i);
+
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
                                hw->mac.mc_filter_type);
@@ -1605,369 +1679,23 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
 {
+       u32 i;
+       u32 rar_entries = hw->mac.num_rar_entries;
        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
+       if (a->mc_addr_in_rar_count > 0)
+               for (i = (rar_entries - a->mc_addr_in_rar_count);
+                    i < rar_entries; i++)
+                       ixgbe_disable_rar(hw, i);
+
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
        return 0;
 }
 
-/**
- *  ixgbe_fc_enable_generic - Enable flow control
- *  @hw: pointer to hardware structure
- *  @packetbuf_num: packet buffer number (0-7)
- *
- *  Enable flow control according to the current settings.
- **/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
-{
-       s32 ret_val = 0;
-       u32 mflcn_reg, fccfg_reg;
-       u32 reg;
-       u32 rx_pba_size;
-
-#ifdef CONFIG_DCB
-       if (hw->fc.requested_mode == ixgbe_fc_pfc)
-               goto out;
-
-#endif /* CONFIG_DCB */
-       /* Negotiate the fc mode to use */
-       ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val)
-               goto out;
-
-       /* Disable any previous flow control settings */
-       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-       mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
-
-       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-       fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
-
-       /*
-        * The possible values of fc.current_mode are:
-        * 0: Flow control is completely disabled
-        * 1: Rx flow control is enabled (we can receive pause frames,
-        *    but not send pause frames).
-        * 2: Tx flow control is enabled (we can send pause frames but
-        *    we do not support receiving pause frames).
-        * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-        * 4: Priority Flow Control is enabled.
-#endif
-        * other: Invalid.
-        */
-       switch (hw->fc.current_mode) {
-       case ixgbe_fc_none:
-               /* Flow control is disabled by software override or autoneg.
-                * The code below will actually disable it in the HW.
-                */
-               break;
-       case ixgbe_fc_rx_pause:
-               /*
-                * Rx Flow control is enabled and Tx Flow control is
-                * disabled by software override. Since there really
-                * isn't a way to advertise that we are capable of RX
-                * Pause ONLY, we will advertise that we support both
-                * symmetric and asymmetric Rx PAUSE.  Later, we will
-                * disable the adapter's ability to send PAUSE frames.
-                */
-               mflcn_reg |= IXGBE_MFLCN_RFCE;
-               break;
-       case ixgbe_fc_tx_pause:
-               /*
-                * Tx Flow control is enabled, and Rx Flow control is
-                * disabled by software override.
-                */
-               fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
-               break;
-       case ixgbe_fc_full:
-               /* Flow control (both Rx and Tx) is enabled by SW override. */
-               mflcn_reg |= IXGBE_MFLCN_RFCE;
-               fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
-               break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
-       default:
-               hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = -IXGBE_ERR_CONFIG;
-               goto out;
-               break;
-       }
-
-       /* Set 802.3x based flow control settings. */
-       mflcn_reg |= IXGBE_MFLCN_DPF;
-       IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
-       IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
-
-       reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
-       /* Thresholds are different for link flow control when in DCB mode */
-       if (reg & IXGBE_MTQC_RT_ENA) {
-               rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
-
-               /* Always disable XON for LFC when in DCB mode */
-               reg = (rx_pba_size >> 5) & 0xFFE0;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
-
-               reg = (rx_pba_size >> 2) & 0xFFE0;
-               if (hw->fc.current_mode & ixgbe_fc_tx_pause)
-                       reg |= IXGBE_FCRTH_FCEN;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
-       } else {
-               /* Set up and enable Rx high/low water mark thresholds,
-                * enable XON. */
-               if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-                       if (hw->fc.send_xon) {
-                               IXGBE_WRITE_REG(hw,
-                                             IXGBE_FCRTL_82599(packetbuf_num),
-                                             (hw->fc.low_water |
-                                             IXGBE_FCRTL_XONE));
-                       } else {
-                               IXGBE_WRITE_REG(hw,
-                                             IXGBE_FCRTL_82599(packetbuf_num),
-                                             hw->fc.low_water);
-                       }
-
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
-                                      (hw->fc.high_water | IXGBE_FCRTH_FCEN));
-               }
-       }
-
-       /* Configure pause time (2 TCs per register) */
-       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
-       if ((packetbuf_num & 1) == 0)
-               reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
-       else
-               reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
-       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
-
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
-
-out:
-       return ret_val;
-}
-
-/**
- *  ixgbe_fc_autoneg - Configure flow control
- *  @hw: pointer to hardware structure
- *
- *  Compares our advertised flow control capabilities to those advertised by
- *  our link partner, and determines the proper flow control mode to use.
- **/
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
-{
-       s32 ret_val = 0;
-       ixgbe_link_speed speed;
-       u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-       bool link_up;
-
-       /*
-        * AN should have completed when the cable was plugged in.
-        * Look for reasons to bail out.  Bail out if:
-        * - FC autoneg is disabled, or if
-        * - we don't have multispeed fiber, or if
-        * - we're not running at 1G, or if
-        * - link is not up, or if
-        * - link is up but AN did not complete, or if
-        * - link is up and AN completed but timed out
-        *
-        * Since we're being called from an LSC, link is already know to be up.
-        * So use link_up_wait_to_complete=false.
-        */
-       hw->mac.ops.check_link(hw, &speed, &link_up, false);
-       linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-
-       if (hw->fc.disable_fc_autoneg ||
-           !hw->phy.multispeed_fiber ||
-           (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
-           !link_up ||
-           ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-           ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
-               hw->fc.fc_was_autonegged = false;
-               hw->fc.current_mode = hw->fc.requested_mode;
-               hw_dbg(hw, "Autoneg FC was skipped.\n");
-               goto out;
-       }
-
-       /*
-        * Read the AN advertisement and LP ability registers and resolve
-        * local flow control settings accordingly
-        */
-       pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-       pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-       if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-               (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
-               /*
-                * Now we need to check if the user selected Rx ONLY
-                * of pause frames.  In this case, we had to advertise
-                * FULL flow control because we could not advertise RX
-                * ONLY. Hence, we must now check to see if we need to
-                * turn OFF the TRANSMISSION of PAUSE frames.
-                */
-               if (hw->fc.requested_mode == ixgbe_fc_full) {
-                       hw->fc.current_mode = ixgbe_fc_full;
-                       hw_dbg(hw, "Flow Control = FULL.\n");
-               } else {
-                       hw->fc.current_mode = ixgbe_fc_rx_pause;
-                       hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
-               }
-       } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                  (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-               hw->fc.current_mode = ixgbe_fc_tx_pause;
-               hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-       } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                  !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-               hw->fc.current_mode = ixgbe_fc_rx_pause;
-               hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
-       } else {
-               hw->fc.current_mode = ixgbe_fc_none;
-               hw_dbg(hw, "Flow Control = NONE.\n");
-       }
-
-       /* Record that current_mode is the result of a successful autoneg */
-       hw->fc.fc_was_autonegged = true;
-
-out:
-       return ret_val;
-}
-
-/**
- *  ixgbe_setup_fc - Set up flow control
- *  @hw: pointer to hardware structure
- *
- *  Called at init time to set up flow control.
- **/
-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
-{
-       s32 ret_val = 0;
-       u32 reg;
-
-#ifdef CONFIG_DCB
-       if (hw->fc.requested_mode == ixgbe_fc_pfc) {
-               hw->fc.current_mode = hw->fc.requested_mode;
-               goto out;
-       }
-
-#endif /* CONFIG_DCB */
-
-       /* Validate the packetbuf configuration */
-       if (packetbuf_num < 0 || packetbuf_num > 7) {
-               hw_dbg(hw, "Invalid packet buffer number [%d], expected range 
is"
-                         " 0-7\n", packetbuf_num);
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-               goto out;
-       }
-
-       /*
-        * Validate the water mark configuration.  Zero water marks are invalid
-        * because it causes the controller to just blast out fc packets.
-        */
-       if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
-               hw_dbg(hw, "Invalid water mark configuration\n");
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-               goto out;
-       }
-
-       /*
-        * Validate the requested mode.  Strict IEEE mode does not allow
-        * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
-        */
-       if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
-               hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-               goto out;
-       }
-
-       /*
-        * 10gig parts do not have a word in the EEPROM to determine the
-        * default flow control setting, so we explicitly set it to full.
-        */
-       if (hw->fc.requested_mode == ixgbe_fc_default)
-               hw->fc.requested_mode = ixgbe_fc_full;
-
-       /*
-        * Set up the 1G flow control advertisement registers so the HW will be
-        * able to do fc autoneg once the cable is plugged in.  If we end up
-        * using 10g instead, this is harmless.
-        */
-       reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-
-       /*
-        * The possible values of fc.requested_mode are:
-        * 0: Flow control is completely disabled
-        * 1: Rx flow control is enabled (we can receive pause frames,
-        *    but not send pause frames).
-        * 2: Tx flow control is enabled (we can send pause frames but
-        *    we do not support receiving pause frames).
-        * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-        * 4: Priority Flow Control is enabled.
-#endif
-        * other: Invalid.
-        */
-       switch (hw->fc.requested_mode) {
-       case ixgbe_fc_none:
-               /* Flow control completely disabled by software override. */
-               reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-               break;
-       case ixgbe_fc_rx_pause:
-               /*
-                * Rx Flow control is enabled and Tx Flow control is
-                * disabled by software override. Since there really
-                * isn't a way to advertise that we are capable of RX
-                * Pause ONLY, we will advertise that we support both
-                * symmetric and asymmetric Rx PAUSE.  Later, we will
-                * disable the adapter's ability to send PAUSE frames.
-                */
-               reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-               break;
-       case ixgbe_fc_tx_pause:
-               /*
-                * Tx Flow control is enabled, and Rx Flow control is
-                * disabled by software override.
-                */
-               reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
-               reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
-               break;
-       case ixgbe_fc_full:
-               /* Flow control (both Rx and Tx) is enabled by SW override. */
-               reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-               break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
-       default:
-               hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = -IXGBE_ERR_CONFIG;
-               goto out;
-               break;
-       }
-
-       IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
-       reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
-       /* Enable and restart autoneg to inform the link partner */
-       reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
 
-       /* Disable AN timeout */
-       if (hw->fc.strict_ieee)
-               reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
 
-       IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-       hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
-
-out:
-       return ret_val;
-}
 
 /**
  *  ixgbe_disable_pcie_master - Disable PCI-express master access
@@ -2027,10 +1755,6 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 
mask)
        s32 timeout = 200;
 
        while (timeout) {
-               /*
-                * SW EEPROM semaphore bit is used for access to all
-                * SW_FW_SYNC/GSSR bits (not just EEPROM)
-                */
                if (ixgbe_get_eeprom_semaphore(hw))
                        return -IXGBE_ERR_SWFW_SYNC;
 
@@ -2048,7 +1772,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
        }
 
        if (!timeout) {
-               hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC 
timeout.\n");
+               hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
                return -IXGBE_ERR_SWFW_SYNC;
        }
 
@@ -2081,75 +1805,3 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 
mask)
        ixgbe_release_eeprom_semaphore(hw);
 }
 
-/**
- *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
- *  @hw: pointer to hardware structure
- *  @regval: register value to write to RXCTRL
- *
- *  Enables the Rx DMA unit
- **/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
-{
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
-
-       return 0;
-}
-
-/**
- *  ixgbe_blink_led_start_generic - Blink LED based on index.
- *  @hw: pointer to hardware structure
- *  @index: led number to blink
- **/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
-{
-       ixgbe_link_speed speed = 0;
-       bool link_up = 0;
-       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
-       /*
-        * Link must be up to auto-blink the LEDs;
-        * Force it if link is down.
-        */
-       hw->mac.ops.check_link(hw, &speed, &link_up, false);
-
-       if (!link_up) {
-
-               autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-               autoc_reg |= IXGBE_AUTOC_FLU;
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-               msleep(10);
-       }
-
-       led_reg &= ~IXGBE_LED_MODE_MASK(index);
-       led_reg |= IXGBE_LED_BLINK(index);
-       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-       IXGBE_WRITE_FLUSH(hw);
-
-       return 0;
-}
-
-/**
- *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
- *  @hw: pointer to hardware structure
- *  @index: led number to stop blinking
- **/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
-{
-       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
-
-       autoc_reg &= ~IXGBE_AUTOC_FLU;
-       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
-       led_reg &= ~IXGBE_LED_MODE_MASK(index);
-       led_reg &= ~IXGBE_LED_BLINK(index);
-       led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
-       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-       IXGBE_WRITE_FLUSH(hw);
-
-       return 0;
-}
-
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 5045656..a6a08f5 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -61,14 +61,11 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 
u8 *mc_addr_list,
                                       ixgbe_mc_addr_itr func);
 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
                                       u32 addr_count, ixgbe_mc_addr_itr func);
-void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
 
-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num);
 
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -77,7 +74,4 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
 
 s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
 s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-
 #endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index dd2df31..3a16e94 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@
 #include "ixgbe_type.h"
 #include "ixgbe_dcb.h"
 #include "ixgbe_dcb_82598.h"
-#include "ixgbe_dcb_82599.h"
 
 /**
  * ixgbe_dcb_config - Struct containing DCB settings.
@@ -218,8 +217,6 @@ s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct 
ixgbe_hw_stats *stats,
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
        return ret;
 }
 
@@ -237,8 +234,6 @@ s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct 
ixgbe_hw_stats *stats,
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
        return ret;
 }
 
@@ -255,8 +250,6 @@ s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
        return ret;
 }
 
@@ -273,8 +266,6 @@ s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
        return ret;
 }
 
@@ -291,8 +282,6 @@ s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
        return ret;
 }
 
@@ -309,8 +298,6 @@ s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_config_pfc_82599(hw, dcb_config);
        return ret;
 }
 
@@ -326,8 +313,6 @@ s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_config_tc_stats_82598(hw);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_config_tc_stats_82599(hw);
        return ret;
 }
 
@@ -344,7 +329,5 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
        s32 ret = 0;
        if (hw->mac.type == ixgbe_mac_82598EB)
                ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
-               ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
        return ret;
 }
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 112c641..206c9f2 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -74,26 +74,6 @@ enum strict_prio_type {
        prio_link
 };
 
-/* DCB capability definitions */
-#define IXGBE_DCB_PG_SUPPORT        0x00000001
-#define IXGBE_DCB_PFC_SUPPORT       0x00000002
-#define IXGBE_DCB_BCN_SUPPORT       0x00000004
-#define IXGBE_DCB_UP2TC_SUPPORT     0x00000008
-#define IXGBE_DCB_GSP_SUPPORT       0x00000010
-
-#define IXGBE_DCB_8_TC_SUPPORT      0x80
-
-struct dcb_support {
-       /* DCB capabilities */
-       u32 capabilities;
-
-       /* Each bit represents a number of TCs configurable in the hw.
-        * If 8 traffic classes can be configured, the value is 0x80.
-        */
-       u8  traffic_classes;
-       u8  pfc_traffic_classes;
-};
-
 /* Traffic class bandwidth allocation per direction */
 struct tc_bw_alloc {
        u8 bwg_id;                /* Bandwidth Group (BWG) ID */
@@ -127,15 +107,9 @@ enum dcb_rx_pba_cfg {
        pba_80_48      /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
 };
 
-struct dcb_num_tcs {
-       u8 pg_tcs;
-       u8 pfc_tcs;
-};
 
 struct ixgbe_dcb_config {
        struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
-       struct dcb_support support;
-       struct dcb_num_tcs num_tcs;
        u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
        bool pfc_mode_enable;
        bool  round_robin_enable;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c 
b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index ace0a1f..9f937a0 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -299,13 +299,11 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
        u32 reg, rx_pba_size;
        u8  i;
 
-       if (!dcb_config->pfc_mode_enable)
-               goto out;
-
        /* Enable Transmit Priority Flow Control */
        reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
        reg &= ~IXGBE_RMCS_TFCE_802_3X;
        /* correct the reporting of our flow control status */
+       hw->fc.current_mode = ixgbe_fc_none;
        reg |= IXGBE_RMCS_TFCE_PRIORITY;
        IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
 
@@ -349,7 +347,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
        /* Configure flow control refresh threshold value */
        IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
 
-out:
        return 0;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h 
b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index 247192c..592b0f8 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c 
b/drivers/net/ixgbe/ixgbe_dcb_82599.c
deleted file mode 100644
index 8dd78b0..0000000
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*******************************************************************************
-
-  Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@xxxxxxxxxxxxxxxxxxxxx>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-#include "ixgbe_type.h"
-#include "ixgbe_dcb.h"
-#include "ixgbe_dcb_82599.h"
-
-/**
- * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the status data for each of the Traffic Classes in 
use.
- */
-s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
-                                 struct ixgbe_hw_stats *stats,
-                                 u8 tc_count)
-{
-       int tc;
-
-       if (tc_count > MAX_TRAFFIC_CLASS)
-               return DCB_ERR_PARAM;
-       /* Statistics pertaining to each traffic class */
-       for (tc = 0; tc < tc_count; tc++) {
-               /* Transmitted Packets */
-               stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
-               /* Transmitted Bytes */
-               stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
-               /* Received Packets */
-               stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
-               /* Received Bytes */
-               stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
-
-#if 0
-               /* Can we get rid of these??  Consequently, getting rid
-                * of the tc_stats structure.
-                */
-               tc_stats_array[up]->in_overflow_discards = 0;
-               tc_stats_array[up]->out_overflow_discards = 0;
-#endif
-       }
-
-       return 0;
-}
-
-/**
- * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the CBFC status data for each of the Traffic Classes.
- */
-s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
-                                  struct ixgbe_hw_stats *stats,
-                                  u8 tc_count)
-{
-       int tc;
-
-       if (tc_count > MAX_TRAFFIC_CLASS)
-               return DCB_ERR_PARAM;
-       for (tc = 0; tc < tc_count; tc++) {
-               /* Priority XOFF Transmitted */
-               stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
-               /* Priority XOFF Received */
-               stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
-       }
-
-       return 0;
-}
-
-/**
- * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers for DCB mode.
- */
-s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_dcb_config *dcb_config)
-{
-       s32 ret_val = 0;
-       u32 value = IXGBE_RXPBSIZE_64KB;
-       u8  i = 0;
-
-       /* Setup Rx packet buffer sizes */
-       switch (dcb_config->rx_pba_cfg) {
-       case pba_80_48:
-               /* Setup the first four at 80KB */
-               value = IXGBE_RXPBSIZE_80KB;
-               for (; i < 4; i++)
-                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
-               /* Setup the last four at 48KB...don't re-init i */
-               value = IXGBE_RXPBSIZE_48KB;
-               /* Fall Through */
-       case pba_equal:
-       default:
-               for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
-                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
-
-               /* Setup Tx packet buffer sizes */
-               for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
-                       IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
-                                       IXGBE_TXPBSIZE_20KB);
-                       IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
-                                       IXGBE_TXPBTHRESH_DCB);
-               }
-               break;
-       }
-
-       return ret_val;
-}
-
-/**
- * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Rx Packet Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config)
-{
-       struct tc_bw_alloc    *p;
-       u32    reg           = 0;
-       u32    credit_refill = 0;
-       u32    credit_max    = 0;
-       u8     i             = 0;
-
-       /*
-        * Disable the arbiter before changing parameters
-        * (always enable recycle mode; WSP)
-        */
-       reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
-       IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
-       /* Map all traffic classes to their UP, 1 to 1 */
-       reg = 0;
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-               reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT));
-       IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
-
-       /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-
-               credit_refill = p->data_credits_refill;
-               credit_max    = p->data_credits_max;
-               reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
-
-               reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
-
-               if (p->prio_type == prio_link)
-                       reg |= IXGBE_RTRPT4C_LSP;
-
-               IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
-       }
-
-       /*
-        * Configure Rx packet plane (recycle mode; WSP) and
-        * enable arbiter
-        */
-       reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
-       IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
-       return 0;
-}
-
-/**
- * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Descriptor Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
-{
-       struct tc_bw_alloc *p;
-       u32    reg, max_credits;
-       u8     i;
-
-       /*
-        * Disable the arbiter before changing parameters
-        * (always enable recycle mode; WSP)
-        */
-       reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM | IXGBE_RTTDCS_ARBDIS;
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-       /* Clear the per-Tx queue credits; we use per-TC instead */
-       for (i = 0; i < 128; i++) {
-               IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
-               IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
-       }
-
-       /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               max_credits = dcb_config->tc_config[i].desc_credits_max;
-               reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
-               reg |= p->data_credits_refill;
-               reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
-
-               if (p->prio_type == prio_group)
-                       reg |= IXGBE_RTTDT2C_GSP;
-
-               if (p->prio_type == prio_link)
-                       reg |= IXGBE_RTTDT2C_LSP;
-
-               IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
-       }
-
-       /*
-        * Configure Tx descriptor plane (recycle mode; WSP) and
-        * enable arbiter
-        */
-       reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-       return 0;
-}
-
-/**
- * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Packet Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
-{
-       struct tc_bw_alloc *p;
-       u32 reg;
-       u8 i;
-
-       /*
-        * Disable the arbiter before changing parameters
-        * (always enable recycle mode; SP; arb delay)
-        */
-       reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
-             (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
-             IXGBE_RTTPCS_ARBDIS;
-       IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
-
-       /* Map all traffic classes to their UP, 1 to 1 */
-       reg = 0;
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-               reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT));
-       IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
-
-       /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               reg = p->data_credits_refill;
-               reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
-               reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
-
-               if (p->prio_type == prio_group)
-                       reg |= IXGBE_RTTPT2C_GSP;
-
-               if (p->prio_type == prio_link)
-                       reg |= IXGBE_RTTPT2C_LSP;
-
-               IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
-       }
-
-       /*
-        * Configure Tx packet plane (recycle mode; SP; arb delay) and
-        * enable arbiter
-        */
-       reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
-             (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
-       IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
-
-       return 0;
-}
-
-/**
- * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Priority Flow Control (PFC) for each traffic class.
- */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config)
-{
-       u32 i, reg, rx_pba_size;
-
-       /* If PFC is disabled globally then fall back to LFC. */
-       if (!dcb_config->pfc_mode_enable) {
-               for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-                       hw->mac.ops.fc_enable(hw, i);
-               goto out;
-       }
-
-       /* Configure PFC Tx thresholds per TC */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               if (dcb_config->rx_pba_cfg == pba_equal)
-                       rx_pba_size = IXGBE_RXPBSIZE_64KB;
-               else
-                       rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
-                                             : IXGBE_RXPBSIZE_48KB;
-
-               reg = ((rx_pba_size >> 5) & 0xFFE0);
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
-                       reg |= IXGBE_FCRTL_XONE;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
-
-               reg = ((rx_pba_size >> 2) & 0xFFE0);
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
-                       reg |= IXGBE_FCRTH_FCEN;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
-       }
-
-       /* Configure pause time (2 TCs per register) */
-       reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
-       for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
-               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-       /* Configure flow control refresh threshold value */
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-       /* Enable Transmit PFC */
-       reg = IXGBE_FCCFG_TFCE_PRIORITY;
-       IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
-
-       /*
-        * Enable Receive PFC
-        * We will always honor XOFF frames we receive when
-        * we are in PFC mode.
-        */
-       reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-       reg &= ~IXGBE_MFLCN_RFCE;
-       reg |= IXGBE_MFLCN_RPFCE;
-       IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
-out:
-       return 0;
-}
-
-/**
- * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
- * @hw: pointer to hardware structure
- *
- * Configure queue statistics registers, all queues belonging to same traffic
- * class uses a single set of queue statistics counters.
- */
-s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
-{
-       u32 reg = 0;
-       u8  i   = 0;
-
-       /*
-        * Receive Queues stats setting
-        * 32 RQSMR registers, each configuring 4 queues.
-        * Set all 16 queues of each TC to the same stat
-        * with TC 'n' going to stat 'n'.
-        */
-       for (i = 0; i < 32; i++) {
-               reg = 0x01010101 * (i / 4);
-               IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
-       }
-       /*
-        * Transmit Queues stats setting
-        * 32 TQSM registers, each controlling 4 queues.
-        * Set all queues of each TC to the same stat
-        * with TC 'n' going to stat 'n'.
-        * Tx queues are allocated non-uniformly to TCs:
-        * 32, 32, 16, 16, 8, 8, 8, 8.
-        */
-       for (i = 0; i < 32; i++) {
-               if (i < 8)
-                       reg = 0x00000000;
-               else if (i < 16)
-                       reg = 0x01010101;
-               else if (i < 20)
-                       reg = 0x02020202;
-               else if (i < 24)
-                       reg = 0x03030303;
-               else if (i < 26)
-                       reg = 0x04040404;
-               else if (i < 28)
-                       reg = 0x05050505;
-               else if (i < 30)
-                       reg = 0x06060606;
-               else
-                       reg = 0x07070707;
-               IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
-       }
-
-       return 0;
-}
-
-/**
- * ixgbe_dcb_config_82599 - Configure general DCB parameters
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure general DCB parameters.
- */
-s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
-{
-       u32 reg;
-       u32 q;
-
-       /* Disable the Tx desc arbiter so that MTQC can be changed */
-       reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-       reg |= IXGBE_RTTDCS_ARBDIS;
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-       /* Enable DCB for Rx with 8 TCs */
-       reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
-       switch (reg & IXGBE_MRQC_MRQE_MASK) {
-       case 0:
-       case IXGBE_MRQC_RT4TCEN:
-               /* RSS disabled cases */
-               reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
-               break;
-       case IXGBE_MRQC_RSSEN:
-       case IXGBE_MRQC_RTRSS4TCEN:
-               /* RSS enabled cases */
-               reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
-               break;
-       default:
-               /* Unsupported value, assume stale data, overwrite no RSS */
-               reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
-       }
-       IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
-
-       /* Enable DCB for Tx with 8 TCs */
-       reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
-       IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
-
-       /* Disable drop for all queues */
-       for (q=0; q < 128; q++) {
-               IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
-       }
-
-       /* Enable the Tx desc arbiter */
-       reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-       reg &= ~IXGBE_RTTDCS_ARBDIS;
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-       return 0;
-}
-
-/**
- * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure dcb settings and enable dcb mode.
- */
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *dcb_config)
-{
-       u32  pap = 0;
-
-       ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
-       ixgbe_dcb_config_82599(hw);
-       ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_pfc_82599(hw, dcb_config);
-       ixgbe_dcb_config_tc_stats_82599(hw);
-
-       /*
-        * TODO: For DCB SV purpose only,
-        * remove it before product release
-        */
-       if (dcb_config->link_speed > 0 && dcb_config->link_speed <= 9) {
-               pap = IXGBE_READ_REG(hw, IXGBE_PAP);
-               pap |= (dcb_config->link_speed << 16);
-               IXGBE_WRITE_REG(hw, IXGBE_PAP, pap);
-       }
-
-       return 0;
-}
-
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h 
b/drivers/net/ixgbe/ixgbe_dcb_82599.h
deleted file mode 100644
index 00cf7da..0000000
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*******************************************************************************
-
-  Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@xxxxxxxxxxxxxxxxxxxxx>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _DCB_82599_CONFIG_H_
-#define _DCB_82599_CONFIG_H_
-
-/* DCB register definitions */
-#define IXGBE_RTTDCS_TDPAC      0x00000001 /* 0 Round Robin,
-                                            * 1 WSP - Weighted Strict Priority
-                                            */
-#define IXGBE_RTTDCS_VMPAC      0x00000002 /* 0 Round Robin,
-                                            * 1 WRR - Weighted Round Robin
-                                            */
-#define IXGBE_RTTDCS_TDRM       0x00000010 /* Transmit Recycle Mode */
-#define IXGBE_RTTDCS_BDPM       0x00400000 /* Bypass Data Pipe - must clear! */
-#define IXGBE_RTTDCS_BPBFSM     0x00800000 /* Bypass PB Free Space - must
-                                             * clear!
-                                             */
-#define IXGBE_RTTDCS_SPEED_CHG  0x80000000 /* Link speed change */
-
-/* Receive UP2TC mapping */
-#define IXGBE_RTRUP2TC_UP_SHIFT 3
-/* Transmit UP2TC mapping */
-#define IXGBE_RTTUP2TC_UP_SHIFT 3
-
-#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
-#define IXGBE_RTRPT4C_BWG_SHIFT 9  /* Offset to BWG index */
-#define IXGBE_RTRPT4C_GSP       0x40000000 /* GSP enable bit */
-#define IXGBE_RTRPT4C_LSP       0x80000000 /* LSP enable bit */
-
-#define IXGBE_RDRXCTL_MPBEN     0x00000010 /* DMA config for multiple packet
-                                            * buffers enable
-                                            */
-#define IXGBE_RDRXCTL_MCEN      0x00000040 /* DMA config for multiple cores
-                                            * (RSS) enable
-                                            */
-
-/* RTRPCS Bit Masks */
-#define IXGBE_RTRPCS_RRM        0x00000002 /* Receive Recycle Mode enable */
-/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
-#define IXGBE_RTRPCS_RAC        0x00000004
-#define IXGBE_RTRPCS_ARBDIS     0x00000040 /* Arbitration disable bit */
-
-/* RTTDT2C Bit Masks */
-#define IXGBE_RTTDT2C_MCL_SHIFT 12
-#define IXGBE_RTTDT2C_BWG_SHIFT 9
-#define IXGBE_RTTDT2C_GSP       0x40000000
-#define IXGBE_RTTDT2C_LSP       0x80000000
-
-#define IXGBE_RTTPT2C_MCL_SHIFT 12
-#define IXGBE_RTTPT2C_BWG_SHIFT 9
-#define IXGBE_RTTPT2C_GSP       0x40000000
-#define IXGBE_RTTPT2C_LSP       0x80000000
-
-/* RTTPCS Bit Masks */
-#define IXGBE_RTTPCS_TPPAC      0x00000020 /* 0 Round Robin,
-                                            * 1 SP - Strict Priority
-                                            */
-#define IXGBE_RTTPCS_ARBDIS     0x00000040 /* Arbiter disable */
-#define IXGBE_RTTPCS_TPRM       0x00000100 /* Transmit Recycle Mode enable */
-#define IXGBE_RTTPCS_ARBD_SHIFT 22
-#define IXGBE_RTTPCS_ARBD_DCB   0x4        /* Arbitration delay in DCB mode */
-
-#define IXGBE_TXPBSIZE_20KB     0x00005000 /* 20KB Packet Buffer */
-#define IXGBE_TXPBSIZE_40KB     0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB     0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB     0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB     0x00014000 /* 80KB Packet Buffer */
-#define IXGBE_RXPBSIZE_128KB    0x00020000 /* 128KB Packet Buffer */
-
-#define IXGBE_TXPBTHRESH_DCB    0xA        /* THRESH value for DCB mode */
-
-
-/* DCB hardware-specific driver APIs */
-
-/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config);
-s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
-                                  struct ixgbe_hw_stats *stats,
-                                  u8 tc_count);
-
-/* DCB traffic class stats */
-s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw);
-s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
-                                 struct ixgbe_hw_stats *stats,
-                                 u8 tc_count);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config 
*dcb_config);
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config 
*dcb_config);
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config);
-
-/* DCB hw initialization */
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *config);
-
-#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 043045d..f275114 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -27,31 +27,19 @@
 
 #include "ixgbe.h"
 
-#ifdef CONFIG_DCB
-#include <linux/dcbnl.h>
-#include "ixgbe_dcb_82598.h"
-#include "ixgbe_dcb_82599.h"
-#else
 #include <linux/netlink.h>
 #include <linux/genetlink.h>
 #include <net/genetlink.h>
 #include <linux/netdevice.h>
-#endif
 
 /* Callbacks for DCB netlink in the kernel */
 #define BIT_DCB_MODE    0x01
 #define BIT_PFC         0x02
 #define BIT_PG_RX       0x04
 #define BIT_PG_TX       0x08
-#define BIT_RESETLINK   0x40
+#define BIT_BCN         0x10
 #define BIT_LINKSPEED   0x80
 
-/* Responses for the DCB_C_SET_ALL command */
-#define DCB_HW_CHG_RST  0  /* DCB configuration changed with reset */
-#define DCB_NO_HW_CHG   1  /* DCB configuration did not change */
-#define DCB_HW_CHG      2  /* DCB configuration changed, no reset */
-
-#ifndef CONFIG_DCB
 /* DCB configuration commands */
 enum {
        DCB_C_UNDEFINED,
@@ -267,66 +255,7 @@ static int ixgbe_dcb_check_adapter(struct net_device 
*netdev)
        else
                return -EINVAL;
 }
-#endif
-
-#ifdef CONFIG_DCB
-int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
-                      struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
-{
-       struct tc_configuration *src_tc_cfg = NULL;
-       struct tc_configuration *dst_tc_cfg = NULL;
-       int i;
-
-       if (!src_dcb_cfg || !dst_dcb_cfg)
-               return -EINVAL;
-
-       for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
-               src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
-               dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
-                               src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
-
-               dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
-                               src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
-
-               dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
-                               src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
-
-               dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
-                               src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
-
-               dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
-                               src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
-
-               dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
-                               src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
-
-               dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
-                               src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
-
-               dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
-                               src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
-       }
-
-       for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
-               dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
-                       [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-                               [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
-               dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
-                       [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-                               [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
-       }
-
-       for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
-               dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
-                       src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
-       }
-       dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
-
-       return 0;
-}
-#else
 static int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
                              struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
 {
@@ -413,90 +342,7 @@ err:
        kfree(dcb_skb);
        return -EINVAL;
 }
-#endif
-
-#ifdef CONFIG_DCB
-static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
-
-       return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
-}
-
-static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
-{
-       u8 err = 0;
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
 
-       if (state > 0) {
-               /* Turn on DCB */
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-                       goto out;
-
-               if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
-                       DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n");
-                       err = 1;
-                       goto out;
-               }
-
-               if (netif_running(netdev))
-#ifdef HAVE_NET_DEVICE_OPS
-                       netdev->netdev_ops->ndo_stop(netdev);
-#else
-                       netdev->stop(netdev);
-#endif
-               ixgbe_clear_interrupt_scheme(adapter);
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       adapter->last_lfc_mode = adapter->hw.fc.current_mode;
-                       adapter->hw.fc.requested_mode = ixgbe_fc_none;
-               }
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       DPRINTK(DRV, INFO, "DCB enabled, "
-                               "disabling Flow Director\n");
-                       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                       adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-               }
-               adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
-               ixgbe_init_interrupt_scheme(adapter);
-               if (netif_running(netdev))
-#ifdef HAVE_NET_DEVICE_OPS
-                       netdev->netdev_ops->ndo_open(netdev);
-#else
-                       netdev->open(netdev);
-#endif
-       } else {
-               /* Turn off DCB */
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       if (netif_running(netdev))
-#ifdef HAVE_NET_DEVICE_OPS
-                               netdev->netdev_ops->ndo_stop(netdev);
-#else
-                               netdev->stop(netdev);
-#endif
-                       ixgbe_clear_interrupt_scheme(adapter);
-                       adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
-                       adapter->temp_dcb_cfg.pfc_mode_enable = false;
-                       adapter->dcb_cfg.pfc_mode_enable = false;
-                       adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-                       adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
-                       ixgbe_init_interrupt_scheme(adapter);
-                       if (netif_running(netdev))
-#ifdef HAVE_NET_DEVICE_OPS
-                               netdev->netdev_ops->ndo_open(netdev);
-#else
-                               netdev->open(netdev);
-#endif
-               }
-       }
-out:
-       return err;
-}
-#else
 static int ixgbe_dcb_gstate(struct sk_buff *skb, struct genl_info *info)
 {
        int ret = -ENOMEM;
@@ -529,6 +375,9 @@ err_out:
        return ret;
 }
 
+extern void ixgbe_napi_add_all(struct ixgbe_adapter *);
+extern void ixgbe_napi_del_all(struct ixgbe_adapter *);
+
 static int ixgbe_dcb_sstate(struct sk_buff *skb, struct genl_info *info)
 {
        struct net_device *netdev = NULL;
@@ -558,25 +407,27 @@ static int ixgbe_dcb_sstate(struct sk_buff *skb, struct 
genl_info *info)
                case 0:
                        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                                if (netdev->flags & IFF_UP)
-#ifdef HAVE_NET_DEVICE_OPS
-                                       netdev->netdev_ops->ndo_stop(netdev);
-#else
                                        netdev->stop(netdev);
+                               ixgbe_reset_interrupt_capability(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+                               ixgbe_napi_del_all(adapter);
 #endif
-                               ixgbe_clear_interrupt_scheme(adapter);
+                               kfree(adapter->tx_ring);
+                               kfree(adapter->rx_ring);
+                               adapter->tx_ring = NULL;
+                               adapter->rx_ring = NULL;
 
                                adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
                                if (adapter->flags & IXGBE_FLAG_RSS_CAPABLE)
                                        adapter->flags |=
                                                         IXGBE_FLAG_RSS_ENABLED;
                                ixgbe_init_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+                               ixgbe_napi_add_all(adapter);
+#endif
                                ixgbe_reset(adapter);
                                if (netdev->flags & IFF_UP)
-#ifdef HAVE_NET_DEVICE_OPS
-                                       netdev->netdev_ops->ndo_open(netdev);
-#else
                                        netdev->open(netdev);
-#endif
                                break;
                        } else {
                                /* Nothing to do, already off */
@@ -593,37 +444,26 @@ static int ixgbe_dcb_sstate(struct sk_buff *skb, struct 
genl_info *info)
                                goto err_out;
                        } else {
                                if (netdev->flags & IFF_UP)
-#ifdef HAVE_NET_DEVICE_OPS
-                                       netdev->netdev_ops->ndo_stop(netdev);
-#else
                                        netdev->stop(netdev);
+                               ixgbe_reset_interrupt_capability(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+                               ixgbe_napi_del_all(adapter);
 #endif
-                               ixgbe_clear_interrupt_scheme(adapter);
+                               kfree(adapter->tx_ring);
+                               kfree(adapter->rx_ring);
+                               adapter->tx_ring = NULL;
+                               adapter->rx_ring = NULL;
 
                                adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
                                adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
-                               adapter->dcb_cfg.support.capabilities =
-                                (IXGBE_DCB_PG_SUPPORT | IXGBE_DCB_PFC_SUPPORT |
-                                 IXGBE_DCB_GSP_SUPPORT);
-                               if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                                       DPRINTK(DRV, INFO, "DCB enabled, "
-                                               "disabling Flow Director\n");
-                                       adapter->flags &=
-                                                 ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                                       adapter->flags &=
-                                              ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-                                       adapter->dcb_cfg.support.capabilities |=
-                                                       IXGBE_DCB_UP2TC_SUPPORT;
-                               }
                                adapter->ring_feature[RING_F_DCB].indices = 8;
                                ixgbe_init_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+                               ixgbe_napi_add_all(adapter);
+#endif
                                ixgbe_reset(adapter);
                                if (netdev->flags & IFF_UP)
-#ifdef HAVE_NET_DEVICE_OPS
-                                       netdev->netdev_ops->ndo_open(netdev);
-#else
                                        netdev->open(netdev);
-#endif
                                break;
                        }
                }
@@ -721,24 +561,7 @@ err_out:
 err:
        return ret;
 }
-#endif
-
-#ifdef CONFIG_DCB
-static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
-                                        u8 *perm_addr)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int i, j;
-
-       for (i = 0; i < netdev->addr_len; i++)
-               perm_addr[i] = adapter->hw.mac.perm_addr[i];
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               for (j = 0; j < netdev->addr_len; j++, i++)
-                       perm_addr[i] = adapter->hw.mac.san_addr[j];
-       }
-}
-#else
 static int ixgbe_dcb_gperm_hwaddr(struct sk_buff *skb, struct genl_info *info)
 {
        void *data;
@@ -816,137 +639,7 @@ err_out:
        dev_put(netdev);
        return ret;
 }
-#endif
-
-#ifdef CONFIG_DCB
-static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
-                                        u8 prio, u8 bwg_id, u8 bw_pct,
-                                        u8 up_map)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       if (prio != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
-       if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
-       if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
-                       bw_pct;
-       if (up_map != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
-                       up_map;
-
-       if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
-            adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
-            adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
-            adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
-               adapter->dcb_set_bitmap |= BIT_PG_TX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
-}
-
-static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int 
bwg_id,
-                                         u8 bw_pct)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
-
-       if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
-               adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
-}
-
-static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
-                                        u8 prio, u8 bwg_id, u8 bw_pct,
-                                        u8 up_map)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       if (prio != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
-       if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
-       if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
-                       bw_pct;
-       if (up_map != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
-                       up_map;
-
-       if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
-            adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
-            adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
-            adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
-               adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
-}
-
-static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int 
bwg_id,
-                                         u8 bw_pct)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
-
-       if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
-               adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
-}
 
-static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
-                                        u8 *prio, u8 *bwg_id, u8 *bw_pct,
-                                        u8 *up_map)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
-       *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
-       *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
-       *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
-}
-
-static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int 
bwg_id,
-                                         u8 *bw_pct)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
-}
-
-static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
-                                        u8 *prio, u8 *bwg_id, u8 *bw_pct,
-                                        u8 *up_map)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
-       *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
-       *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
-       *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
-}
-
-static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int 
bwg_id,
-                                         u8 *bw_pct)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
-}
-#else
 static int ixgbe_dcb_pg_scfg(struct sk_buff *skb, struct genl_info *info,
                                int dir)
 {
@@ -1045,7 +738,6 @@ static int ixgbe_dcb_pg_scfg(struct sk_buff *skb, struct 
genl_info *info,
                        adapter->dcb_set_bitmap |= BIT_PG_TX;
                else
                        adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
 
                DPRINTK(DRV, INFO, "Set DCB PG\n");
        } else {
@@ -1215,29 +907,7 @@ static int ixgbe_dcb_pgrx_gcfg(struct sk_buff *skb, 
struct genl_info *info)
 {
        return ixgbe_dcb_pg_gcfg(skb, info, DCB_RX_CONFIG);
 }
-#endif
-
-#ifdef CONFIG_DCB
-static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
-                                   u8 setting)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
-       if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
-           adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
-               adapter->dcb_set_bitmap |= BIT_PFC;
-       }
-}
 
-static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
-                                   u8 *setting)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
-}
-#else
 static int ixgbe_dcb_spfccfg(struct sk_buff *skb, struct genl_info *info)
 {
        struct nlattr *tb[IXGBE_DCB_PFC_A_UP_MAX + 1];
@@ -1381,70 +1051,7 @@ err_out:
        dev_put(netdev);
        return ret;
 }
-#endif
-
-#ifdef CONFIG_DCB
-static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int ret;
-
-       if (!adapter->dcb_set_bitmap)
-               return DCB_NO_HW_CHG;
-
-       /* Only take down the adapter if the configuration change
-        * requires a reset.
-       */
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
-               while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
-                       msleep(1);
 
-               if (netif_running(netdev))
-                       ixgbe_down(adapter);
-       }
-
-       ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-                                adapter->ring_feature[RING_F_DCB].indices);
-       if (ret) {
-               if (adapter->dcb_set_bitmap & BIT_RESETLINK)
-                       clear_bit(__IXGBE_RESETTING, &adapter->state);
-               return DCB_NO_HW_CHG;
-       }
-
-       if (adapter->dcb_cfg.pfc_mode_enable) {
-               if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
-                       (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
-                       adapter->last_lfc_mode = adapter->hw.fc.current_mode;
-               adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
-       } else {
-               if (adapter->hw.mac.type != ixgbe_mac_82598EB)
-                       adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
-               else
-                       adapter->hw.fc.requested_mode = ixgbe_fc_none;
-       }
-
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
-               if (netif_running(netdev))
-                       ixgbe_up(adapter);
-               ret = DCB_HW_CHG_RST;
-       } else if (adapter->dcb_set_bitmap & BIT_PFC) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-                       ixgbe_dcb_config_pfc_82598(&adapter->hw,
-                               &adapter->dcb_cfg);
-               else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       ixgbe_dcb_config_pfc_82599(&adapter->hw,
-                               &adapter->dcb_cfg);
-               ret = DCB_HW_CHG;
-       }
-       if (adapter->dcb_cfg.pfc_mode_enable)
-               adapter->hw.fc.current_mode = ixgbe_fc_pfc;
-
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK)
-               clear_bit(__IXGBE_RESETTING, &adapter->state);
-       adapter->dcb_set_bitmap = 0x00;
-       return ret;
-}
-#else
 static int ixgbe_dcb_set_all(struct sk_buff *skb, struct genl_info *info)
 {
        struct net_device *netdev = NULL;
@@ -1510,119 +1117,8 @@ err_out:
 err:
        return ret;
 }
-#endif
 
-#ifdef CONFIG_DCB
-static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       u8 rval = 0;
-
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               switch (capid) {
-               case DCB_CAP_ATTR_PG:
-                       *cap = true;
-                       break;
-               case DCB_CAP_ATTR_PFC:
-                       *cap = true;
-                       break;
-               case DCB_CAP_ATTR_UP2TC:
-                       *cap = false;
-                       break;
-               case DCB_CAP_ATTR_PG_TCS:
-                       *cap = 0x80;
-                       break;
-               case DCB_CAP_ATTR_PFC_TCS:
-                       *cap = 0x80;
-                       break;
-               case DCB_CAP_ATTR_GSP:
-                       *cap = true;
-                       break;
-               default:
-                       rval = -EINVAL;
-                       break;
-               }
-       } else {
-               rval = -EINVAL;
-       }
-
-       return rval;
-}
 
-static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       u8 rval = 0;
-
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               switch (tcid) {
-               case DCB_NUMTCS_ATTR_PG:
-                       *num = MAX_TRAFFIC_CLASS;
-                       break;
-               case DCB_NUMTCS_ATTR_PFC:
-                       *num = MAX_TRAFFIC_CLASS;
-                       break;
-               default:
-                       rval = -EINVAL;
-                       break;
-               }
-       } else {
-               rval = -EINVAL;
-       }
-
-       return rval;
-}
-
-static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
-{
-       return -EINVAL;
-}
-
-static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       return adapter->dcb_cfg.pfc_mode_enable;
-}
-
-static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       DPRINTK(DRV, INFO, "Setting PFC state to %d.\n", state);
-       adapter->temp_dcb_cfg.pfc_mode_enable = state;
-       if (adapter->temp_dcb_cfg.pfc_mode_enable != 
-               adapter->dcb_cfg.pfc_mode_enable)
-               adapter->dcb_set_bitmap |= BIT_PFC;
-       return;
-}
-
-#else
-#endif
-
-#ifdef CONFIG_DCB
-struct dcbnl_rtnl_ops dcbnl_ops = {
-       .getstate       = ixgbe_dcbnl_get_state,
-       .setstate       = ixgbe_dcbnl_set_state,
-       .getpermhwaddr  = ixgbe_dcbnl_get_perm_hw_addr,
-       .setpgtccfgtx   = ixgbe_dcbnl_set_pg_tc_cfg_tx,
-       .setpgbwgcfgtx  = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
-       .setpgtccfgrx   = ixgbe_dcbnl_set_pg_tc_cfg_rx,
-       .setpgbwgcfgrx  = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
-       .getpgtccfgtx   = ixgbe_dcbnl_get_pg_tc_cfg_tx,
-       .getpgbwgcfgtx  = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
-       .getpgtccfgrx   = ixgbe_dcbnl_get_pg_tc_cfg_rx,
-       .getpgbwgcfgrx  = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
-       .setpfccfg      = ixgbe_dcbnl_set_pfc_cfg,
-       .getpfccfg      = ixgbe_dcbnl_get_pfc_cfg,
-       .setall         = ixgbe_dcbnl_set_all,
-       .getcap         = ixgbe_dcbnl_getcap,
-       .getnumtcs      = ixgbe_dcbnl_getnumtcs,
-       .setnumtcs      = ixgbe_dcbnl_setnumtcs,
-       .getpfcstate    = ixgbe_dcbnl_getpfcstate,
-       .setpfcstate    = ixgbe_dcbnl_setpfcstate,
-};
-#else
 /* DCB Generic NETLINK command Definitions */
 /* Get DCB Admin Mode */
 static struct genl_ops ixgbe_dcb_genl_c_gstate = {
@@ -1817,4 +1313,3 @@ int ixgbe_dcb_netlink_unregister(void)
 {
        return genl_unregister_family(&dcb_family);
 }
-#endif
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c 
b/drivers/net/ixgbe/ixgbe_ethtool.c
index 26d6f83..e9763dd 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -101,23 +101,20 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
        {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
        {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
        {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
+       {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
 #ifndef IXGBE_NO_LLI
        {"low_latency_interrupt", IXGBE_STAT(lli_int)},
 #endif
        {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
        {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
 #ifndef IXGBE_NO_LRO
-       {"lro_aggregated", IXGBE_STAT(lro_stats.coal)},
-       {"lro_flushed", IXGBE_STAT(lro_stats.flushed)},
-       {"lro_recycled", IXGBE_STAT(lro_stats.recycled)},
+       {"lro_aggregated", IXGBE_STAT(lro_data.stats.coal)},
+       {"lro_flushed", IXGBE_STAT(lro_data.stats.flushed)},
 #endif /* IXGBE_NO_LRO */
-       {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
-#ifndef IXGBE_NO_HW_RSC
-       {"hw_rsc_count", IXGBE_STAT(rsc_count)},
+#ifndef IXGBE_NO_INET_LRO
+       {"lro_aggregated", IXGBE_STAT(lro_aggregated)},
+       {"lro_flushed", IXGBE_STAT(lro_flushed)},
 #endif
-       {"rx_flm", IXGBE_STAT(flm)},
-       {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
-       {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
 };
 
 #define IXGBE_QUEUE_STATS_LEN \
@@ -155,55 +152,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
        ecmd->supported = SUPPORTED_10000baseT_Full;
        ecmd->autoneg = AUTONEG_ENABLE;
        ecmd->transceiver = XCVR_EXTERNAL;
-       if ((hw->phy.media_type == ixgbe_media_type_copper) ||
-           (hw->mac.type == ixgbe_mac_82599EB)) {
+       if (hw->phy.media_type == ixgbe_media_type_copper) {
                ecmd->supported |= (SUPPORTED_1000baseT_Full |
-                                   SUPPORTED_Autoneg);
+                                   SUPPORTED_TP | SUPPORTED_Autoneg);
 
-               ecmd->advertising = ADVERTISED_Autoneg;
+               ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               /*
-                * It's possible that phy.autoneg_advertised may not be
-                * set yet.  If so display what the default would be -
-                * both 1G and 10G supported.
-                */
-               if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
-                                          ADVERTISED_10000baseT_Full)))
-                       ecmd->advertising |= (ADVERTISED_10000baseT_Full |
-                                             ADVERTISED_1000baseT_Full);
-
-               if (hw->phy.media_type == ixgbe_media_type_copper) {
-                       ecmd->supported |= SUPPORTED_TP;
-                       ecmd->advertising |= ADVERTISED_TP;
-                       ecmd->port = PORT_TP;
-               } else {
-                       ecmd->supported |= SUPPORTED_FIBRE;
-                       ecmd->advertising |= ADVERTISED_FIBRE;
-                       ecmd->port = PORT_FIBRE;
-               }
-       } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
-               /* Set as FIBRE until SERDES defined in kernel */
-               switch (hw->device_id) {
-               case IXGBE_DEV_ID_82598:
-                       ecmd->supported |= (SUPPORTED_1000baseT_Full |
-                                           SUPPORTED_FIBRE);
-                       ecmd->advertising = (ADVERTISED_10000baseT_Full |
-                                            ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_FIBRE);
-                       ecmd->port = PORT_FIBRE;
-                       break;
-               case IXGBE_DEV_ID_82598_BX:
-                       ecmd->supported = (SUPPORTED_1000baseT_Full |
-                                          SUPPORTED_FIBRE);
-                       ecmd->advertising = (ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_FIBRE);
-                       ecmd->port = PORT_FIBRE;
-                       ecmd->autoneg = AUTONEG_DISABLE;
-                       break;
-       }
+
+               ecmd->port = PORT_TP;
        } else {
                ecmd->supported |= SUPPORTED_FIBRE;
                ecmd->advertising = (ADVERTISED_10000baseT_Full |
@@ -241,10 +200,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 advertised, old;
-       s32 err = 0;
+       s32 err;
 
-       if ((hw->phy.media_type == ixgbe_media_type_copper) ||
-           (hw->mac.type == ixgbe_mac_82599EB)) {
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber:
+               if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+                   (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+                       return -EINVAL;
+               /* in this case we currently only support 10Gb/FULL */
+               break;
+       case ixgbe_media_type_copper:
                /* 10000/copper and 1000/copper must autoneg
                 * this function does not support any duplex forcing, but can
                 * limit the advertising of the adapter to only 10000 or 1000 */
@@ -260,23 +225,20 @@ static int ixgbe_set_settings(struct net_device *netdev,
                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
                if (old == advertised)
-                       return err;
+                       break;
                /* this sets the link speed and restarts auto-neg */
-               hw->mac.autotry_restart = true;
                err = hw->mac.ops.setup_link_speed(hw, advertised, true, true);
                if (err) {
                        DPRINTK(PROBE, INFO,
                                "setup link failed with code %d\n", err);
                        hw->mac.ops.setup_link_speed(hw, old, true, true);
                }
-       } else {
-               /* in this case we currently only support 10Gb/FULL */
-               if ((ecmd->autoneg == AUTONEG_ENABLE) ||
-                   (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
-                       return -EINVAL;
+               break;
+       default:
+               break;
        }
 
-       return err;
+       return 0;
 }
 
 static void ixgbe_get_pauseparam(struct net_device *netdev,
@@ -285,23 +247,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       /*
-        * Flow Control Autoneg isn't on if
-        *  - we didn't ask for it OR
-        *  - it failed, we know this by tx & rx being off
-        */
-       if (hw->fc.disable_fc_autoneg || (hw->fc.current_mode == ixgbe_fc_none))
-               pause->autoneg = 0;
-       else
-               pause->autoneg = 1;
-
-#ifdef CONFIG_DCB
-       if (hw->fc.current_mode == ixgbe_fc_pfc) {
-               pause->rx_pause = 0;
-               pause->tx_pause = 0;
-               return;
-       }
-#endif
+       pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0);
 
        if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
                pause->rx_pause = 1;
@@ -318,41 +264,25 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_fc_info fc;
-
-       if (adapter->dcb_cfg.pfc_mode_enable ||
-               ((hw->mac.type == ixgbe_mac_82598EB) &&
-               (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
-               return -EINVAL;
-
-       fc = hw->fc;
 
-       if (pause->autoneg != AUTONEG_ENABLE)
-               fc.disable_fc_autoneg = true;
-       else
-               fc.disable_fc_autoneg = false;
-
-       if (pause->rx_pause && pause->tx_pause)
-               fc.requested_mode = ixgbe_fc_full;
+       if ((pause->autoneg == AUTONEG_ENABLE) ||
+           (pause->rx_pause && pause->tx_pause))
+               hw->fc.current_mode = ixgbe_fc_full;
        else if (pause->rx_pause && !pause->tx_pause)
-               fc.requested_mode = ixgbe_fc_rx_pause;
+               hw->fc.current_mode = ixgbe_fc_rx_pause;
        else if (!pause->rx_pause && pause->tx_pause)
-               fc.requested_mode = ixgbe_fc_tx_pause;
+               hw->fc.current_mode = ixgbe_fc_tx_pause;
        else if (!pause->rx_pause && !pause->tx_pause)
-               fc.requested_mode = ixgbe_fc_none;
+               hw->fc.current_mode = ixgbe_fc_none;
        else
                return -EINVAL;
 
-       adapter->last_lfc_mode = fc.requested_mode;
+       hw->fc.requested_mode = hw->fc.current_mode;
 
-       /* if the thing changed then we'll update and use new autoneg */
-       if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
-               hw->fc = fc;
-               if (netif_running(netdev))
-                       ixgbe_reinit_locked(adapter);
-               else
-                       ixgbe_reset(adapter);
-       }
+       if (netif_running(netdev))
+               ixgbe_reinit_locked(adapter);
+       else
+               ixgbe_reset(adapter);
 
        return 0;
 }
@@ -521,15 +451,9 @@ static void ixgbe_get_regs(struct net_device *netdev, 
struct ethtool_regs *regs,
        regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
        regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
        for (i = 0; i < 8; i++)
-               if (hw->mac.type == ixgbe_mac_82599EB)
-                       regs_buff[35 + i] = IXGBE_READ_REG(hw, 
IXGBE_FCRTL_82599(i));
-               else
-                       regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+               regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
        for (i = 0; i < 8; i++)
-               if (hw->mac.type == ixgbe_mac_82599EB)
-                       regs_buff[43 + i] = IXGBE_READ_REG(hw, 
IXGBE_FCRTH_82599(i));
-               else
-                       regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+               regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
        regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
        regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
 
@@ -870,17 +794,10 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
                               struct ethtool_drvinfo *drvinfo)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
 
        strncpy(drvinfo->driver, ixgbe_driver_name, 32);
        strncpy(drvinfo->version, ixgbe_driver_version, 32);
-
-       sprintf(firmware_version, "%d.%d-%d",
-               (adapter->eeprom_version & 0xF000) >> 12,
-               (adapter->eeprom_version & 0x0FF0) >> 4,
-               adapter->eeprom_version & 0x000F);
-
-       strncpy(drvinfo->fw_version, firmware_version, 32);
+       strncpy(drvinfo->fw_version, "N/A", 32);
        strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
        drvinfo->n_stats = IXGBE_STATS_LEN;
        drvinfo->testinfo_len = IXGBE_TEST_LEN;
@@ -908,10 +825,9 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                                struct ethtool_ringparam *ring)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+       struct ixgbe_ring *temp_ring;
        int i, err;
        u32 new_rx_count, new_tx_count;
-       bool need_update = false;
 
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
@@ -930,92 +846,84 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                return 0;
        }
 
+       if (adapter->num_tx_queues > adapter->num_rx_queues)
+               temp_ring = vmalloc(adapter->num_tx_queues *
+                                   sizeof(struct ixgbe_ring));
+       else
+               temp_ring = vmalloc(adapter->num_rx_queues *
+                                   sizeof(struct ixgbe_ring));
+       if (!temp_ring)
+               return -ENOMEM;
+
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                msleep(1);
 
-       temp_tx_ring = kcalloc(adapter->num_tx_queues,
-                              sizeof(struct ixgbe_ring), GFP_KERNEL);
-       if (!temp_tx_ring) {
-               err = -ENOMEM;
-               goto err_setup;
-       }
+       if (netif_running(netdev))
+               ixgbe_down(adapter);
 
-       if (new_tx_count != adapter->tx_ring_count) {
-               memcpy(temp_tx_ring, adapter->tx_ring,
+       /*
+        * We can't just free everything and then setup again,
+        * because the ISRs in MSI-X mode get passed pointers
+        * to the tx and rx ring structs.
+        */
+       if (new_tx_count != adapter->tx_ring->count) {
+               memcpy(temp_ring, adapter->tx_ring,
                       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
+
                for (i = 0; i < adapter->num_tx_queues; i++) {
-                       temp_tx_ring[i].count = new_tx_count;
-                       err = ixgbe_setup_tx_resources(adapter,
-                                                      &temp_tx_ring[i]);
+                       temp_ring[i].count = new_tx_count;
+                       err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
                        if (err) {
                                while (i) {
                                        i--;
                                        ixgbe_free_tx_resources(adapter,
-                                                               
&temp_tx_ring[i]);
+                                                               &temp_ring[i]);
                                }
                                goto err_setup;
                        }
                }
-               need_update = true;
-       }
 
-       temp_rx_ring = kcalloc(adapter->num_rx_queues,
-                              sizeof(struct ixgbe_ring), GFP_KERNEL);
-       if ((!temp_rx_ring) && (need_update)) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
-               kfree(temp_tx_ring);
-               err = -ENOMEM;
-               goto err_setup;
+                       ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+
+               memcpy(adapter->tx_ring, temp_ring,
+                      adapter->num_tx_queues * sizeof(struct ixgbe_ring));
+
+               adapter->tx_ring_count = new_tx_count;
        }
 
-       if (new_rx_count != adapter->rx_ring_count) {
-               memcpy(temp_rx_ring, adapter->rx_ring,
+       if (new_rx_count != adapter->rx_ring->count) {
+               memcpy(temp_ring, adapter->rx_ring,
                       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       temp_rx_ring[i].count = new_rx_count;
-                       err = ixgbe_setup_rx_resources(adapter,
-                                                      &temp_rx_ring[i]);
+                       temp_ring[i].count = new_rx_count;
+                       err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
                        if (err) {
                                while (i) {
                                        i--;
                                        ixgbe_free_rx_resources(adapter,
-                                                             &temp_rx_ring[i]);
+                                                               &temp_ring[i]);
                                }
                                goto err_setup;
                        }
                }
-               need_update = true;
-       }
 
-       /* if rings need to be updated, here's the place to do it in one shot */
-       if (need_update) {
-               if (netif_running(netdev))
-                       ixgbe_down(adapter);
-
-               /* tx */
-               if (new_tx_count != adapter->tx_ring_count) {
-                       kfree(adapter->tx_ring);
-                       adapter->tx_ring = temp_tx_ring;
-                       temp_tx_ring = NULL;
-                       adapter->tx_ring_count = new_tx_count;
-               }
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
 
-               /* rx */
-               if (new_rx_count != adapter->rx_ring_count) {
-                       kfree(adapter->rx_ring);
-                       adapter->rx_ring = temp_rx_ring;
-                       temp_rx_ring = NULL;
-                       adapter->rx_ring_count = new_rx_count;
-               }
+               memcpy(adapter->rx_ring, temp_ring,
+                      adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+
+               adapter->rx_ring_count = new_rx_count;
        }
 
        /* success! */
        err = 0;
+err_setup:
        if (netif_running(netdev))
                ixgbe_up(adapter);
 
-err_setup:
        clear_bit(__IXGBE_RESETTING, &adapter->state);
        return err;
 }
@@ -1034,6 +942,19 @@ static void ixgbe_get_ethtool_stats(struct net_device 
*netdev,
        int j, k;
        int i;
 
+#ifndef IXGBE_NO_INET_LRO
+       unsigned int aggregated = 0, flushed = 0, no_desc = 0;
+
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
+               flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
+               no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
+       }
+       adapter->lro_aggregated = aggregated;
+       adapter->lro_flushed = flushed;
+       adapter->lro_no_desc = no_desc;
+
+#endif
        ixgbe_update_stats(adapter);
        for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
                char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
@@ -1154,55 +1075,31 @@ struct ixgbe_reg_test {
 #define TABLE64_TEST_LO        5
 #define TABLE64_TEST_HI        6
 
-/* default 82599 register test */
-static struct ixgbe_reg_test reg_test_82599[] = {
-       { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
-       { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
-       { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
-       { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
-       { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
-       { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
-       { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
-       { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
-       { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
-       { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
-       { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
-       { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
-       { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
-       { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { 0, 0, 0, 0 }
-};
-
-/* default 82598 register test */
+/* default register test */
 static struct ixgbe_reg_test reg_test_82598[] = {
-       { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
-       { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
-       { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
-       { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
-       { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+       { IXGBE_FCRTL(0),       1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+       { IXGBE_FCRTH(0),       1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+       { IXGBE_PFCTOP,         1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+       { IXGBE_VLNCTRL,        1, PATTERN_TEST, 0x00000000, 0x00000000 },
+       { IXGBE_RDBAL(0),       4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+       { IXGBE_RDBAH(0),       4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+       { IXGBE_RDLEN(0),       4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
        /* Enable all four RX queues before testing. */
-       { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+       { IXGBE_RXDCTL(0),      4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
        /* RDH is read-only for 82598, only test RDT. */
-       { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
-       { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
-       { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
-       { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
-       { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
-       { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
-       { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
-       { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
-       { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
-       { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
-       { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+       { IXGBE_RDT(0),         4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+       { IXGBE_RXDCTL(0),      4, WRITE_NO_TEST, 0, 0 },
+       { IXGBE_FCRTH(0),       1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+       { IXGBE_FCTTV(0),       1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+       { IXGBE_TIPG,           1, PATTERN_TEST, 0x000000FF, 0x000000FF },
+       { IXGBE_TDBAL(0),       4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+       { IXGBE_TDBAH(0),       4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+       { IXGBE_TDLEN(0),       4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+       { IXGBE_RXCTRL,         1, SET_READ_TEST, 0x00000003, 0x00000003 },
+       { IXGBE_DTXCTL,         1, SET_READ_TEST, 0x00000005, 0x00000005 },
+       { IXGBE_RAL(0),         16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+       { IXGBE_RAL(0),         16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
+       { IXGBE_MTA(0),         128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { 0, 0, 0, 0 }
 };
 
@@ -1248,13 +1145,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, 
u64 *data)
        u32 value, before, after;
        u32 i, toggle;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               toggle = 0x7FFFF30F;
-               test = reg_test_82599;
-       } else {
-               toggle = 0x7FFFF3FF;
-               test = reg_test_82598;
-       }
+       toggle = 0x7FFFF3FF;
+       test = reg_test_82598;
 
        /*
         * Because the status register is such a special case,
@@ -1452,42 +1344,16 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter 
*adapter)
 {
        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
-       struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       u32 reg_ctl;
        int i;
 
-       /* shut down the DMA engines now so they can be reinitialized later */
-
-       /* first Rx */
-       reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       reg_ctl &= ~IXGBE_RXCTRL_RXEN;
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
-       reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
-       reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
-       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
-
-       /* now Tx */
-       reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
-       reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
-       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
-               reg_ctl &= ~IXGBE_DMATXCTL_TE;
-               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
-       }
-
-       ixgbe_reset(adapter);
-
        if (tx_ring->desc && tx_ring->tx_buffer_info) {
                for (i = 0; i < tx_ring->count; i++) {
                        struct ixgbe_tx_buffer *buf =
                                        &(tx_ring->tx_buffer_info[i]);
-                       if (buf->dma) {
+                       if (buf->dma)
                                pci_unmap_single(pdev, buf->dma, buf->length,
                                                 PCI_DMA_TODEVICE);
-                               buf->dma = 0;
-                       }
                        if (buf->skb)
                                dev_kfree_skb(buf->skb);
                }
@@ -1497,12 +1363,10 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter 
*adapter)
                for (i = 0; i < rx_ring->count; i++) {
                        struct ixgbe_rx_buffer *buf =
                                        &(rx_ring->rx_buffer_info[i]);
-                       if (buf->dma) {
+                       if (buf->dma)
                                pci_unmap_single(pdev, buf->dma,
                                                 IXGBE_RXBUFFER_2048,
                                                 PCI_DMA_FROMDEVICE);
-                               buf->dma = 0;
-                       }
                        if (buf->skb)
                                dev_kfree_skb(buf->skb);
                }
@@ -1570,11 +1434,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter 
*adapter)
        reg_data |= IXGBE_HLREG0_TXPADEN;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
-               reg_data |= IXGBE_DMATXCTL_TE;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
-       }
        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
        reg_data |= IXGBE_TXDCTL_ENABLE;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
@@ -1658,17 +1517,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter 
*adapter)
        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
        reg_data |= IXGBE_RXDCTL_ENABLE;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               int j = adapter->rx_ring[0].reg_idx;
-               u32 k;
-               for (k = 0; k < 10; k++) {
-                       if (IXGBE_READ_REG(&adapter->hw,
-                                          IXGBE_RXDCTL(j)) & 
IXGBE_RXDCTL_ENABLE)
-                               break;
-                       else
-                               msleep(1);
-               }
-       }
 
        rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
@@ -1939,75 +1787,15 @@ static void ixgbe_diag_test(struct net_device *netdev,
        msleep_interruptible(4 * 1000);
 }
 
-static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
-                               struct ethtool_wolinfo *wol)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       int retval = 1;
-
-       switch(hw->device_id) {
-       case IXGBE_DEV_ID_82599_KX4:
-               retval = 0;
-               break;
-       default:
-               wol->supported = 0;
-               retval = 0;
-       }
-
-       return retval;
-}
-
 static void ixgbe_get_wol(struct net_device *netdev,
                           struct ethtool_wolinfo *wol)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       wol->supported = WAKE_UCAST | WAKE_MCAST |
-                        WAKE_BCAST | WAKE_MAGIC;
+       wol->supported = 0;
        wol->wolopts = 0;
 
-       if (ixgbe_wol_exclusion(adapter, wol) ||
-           !device_can_wakeup(&adapter->pdev->dev))
-               return;
-
-       if (adapter->wol & IXGBE_WUFC_EX)
-               wol->wolopts |= WAKE_UCAST;
-       if (adapter->wol & IXGBE_WUFC_MC)
-               wol->wolopts |= WAKE_MCAST;
-       if (adapter->wol & IXGBE_WUFC_BC)
-               wol->wolopts |= WAKE_BCAST;
-       if (adapter->wol & IXGBE_WUFC_MAG)
-               wol->wolopts |= WAKE_MAGIC;
-
        return;
 }
 
-static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo 
*wol)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
-               return -EOPNOTSUPP;
-
-       if (ixgbe_wol_exclusion(adapter, wol))
-               return wol->wolopts ? -EOPNOTSUPP : 0;
-
-       adapter->wol = 0;
-
-       if (wol->wolopts & WAKE_UCAST)
-               adapter->wol |= IXGBE_WUFC_EX;
-       if (wol->wolopts & WAKE_MCAST)
-               adapter->wol |= IXGBE_WUFC_MC;
-       if (wol->wolopts & WAKE_BCAST)
-               adapter->wol |= IXGBE_WUFC_BC;
-       if (wol->wolopts & WAKE_MAGIC)
-               adapter->wol |= IXGBE_WUFC_MAG;
-
-       device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-
-       return 0;
-}
-
 static int ixgbe_nway_reset(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2051,30 +1839,16 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
 #endif
 
        /* only valid if in constant ITR mode */
-       switch (adapter->itr_setting) {
-       case 0:
-               /* throttling disabled */
-               ec->rx_coalesce_usecs = 0;
-               break;
-       case 1:
-               /* dynamic ITR mode */
-               ec->rx_coalesce_usecs = 1;
-               break;
-       default:
-               /* fixed interrupt rate mode */
+       if (adapter->itr_setting == 0)
                ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
-               break;
-       }
+
        return 0;
 }
 
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector);
-
 static int ixgbe_set_coalesce(struct net_device *netdev,
                               struct ethtool_coalesce *ec)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int i;
 
        if (ec->tx_max_coalesced_frames_irq)
                adapter->tx_ring[0].work_limit = 
ec->tx_max_coalesced_frames_irq;
@@ -2084,81 +1858,37 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
                adapter->rx_ring[0].work_limit = 
ec->rx_max_coalesced_frames_irq;
 
 #endif
-       if (ec->rx_coalesce_usecs > 1) {
-               /* check the limits */
-               if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
-                   (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
-                       return -EINVAL;
-
+       if (ec->rx_coalesce_usecs > 3) {
+               struct ixgbe_hw *hw = &adapter->hw;
+               int i;
                /* store the value in ints/second */
                adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
 
+               for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++){
+                       struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+                       if (q_vector->txr_count && !q_vector->rxr_count)
+                               q_vector->eitr = (adapter->eitr_param >> 1);
+                       else
+                               /* rx only */
+                               q_vector->eitr = adapter->eitr_param;
+                       IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
+                                     EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+               }
+
                /* static value of interrupt rate */
                adapter->itr_setting = adapter->eitr_param;
-               /* clear the lower bit as its used for dynamic state */
-               adapter->itr_setting &= ~1;
-       } else if (ec->rx_coalesce_usecs == 1) {
-               /* 1 means dynamic mode */
-               adapter->eitr_param = 20000;
-               adapter->itr_setting = 1;
        } else {
-               /*
-                * any other value means disable eitr, which is best
-                * served by setting the interrupt rate very high
-                */
-               adapter->eitr_param = IXGBE_MAX_INT_RATE;
-               adapter->itr_setting = 0;
+               /* 1,2,3 means dynamic mode */
+               adapter->itr_setting = ec->rx_coalesce_usecs;
        }
 
-       for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
-               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
-               if (q_vector->txr_count && !q_vector->rxr_count)
-                       /* tx vector gets half the rate */
-                       q_vector->eitr = (adapter->eitr_param >> 1);
-               else if (q_vector->rxr_count)
-                       /* rx only or mixed */
-                       q_vector->eitr = adapter->eitr_param;
-               ixgbe_write_eitr(q_vector);
-       }
+       if (netif_running(netdev))
+               ixgbe_reinit_locked(adapter);
 
        return 0;
 }
 
-#ifdef ETHTOOL_GFLAGS
-static int ixgbe_set_flags(struct net_device *netdev, u32 data)
-{
-#if !defined(IXGBE_NO_HW_RSC) || !defined(IXGBE_NO_LRO)
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#endif
-       ethtool_op_set_flags(netdev, data);
-
-#ifndef IXGBE_NO_HW_RSC
-       /* if state changes we need to update adapter->flags and reset */
-       if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
-               /* cast both to bool and verify if they are set the same */
-               if ((!!(data & ETH_FLAG_LRO)) !=
-                   (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
-                       adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
-                       if (netif_running(netdev))
-                               ixgbe_reinit_locked(adapter);
-                       else
-                               ixgbe_reset(adapter);
-               }
-               return 0;
-       }
-#endif /* IXGBE_NO_HW_RSC */
-#ifndef IXGBE_NO_LRO
-       /* cast both to bool and verify if they are set the same */
-       if ((!!(data & ETH_FLAG_LRO)) != 
-           (!!(adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED)))
-               adapter->flags2 ^= IXGBE_FLAG2_SWLRO_ENABLED;
-
-#endif /* IXGBE_NO_LRO */
-       return 0;
-
-}
 
-#endif /* ETHTOOL_GFLAGS */
 static struct ethtool_ops ixgbe_ethtool_ops = {
        .get_settings           = ixgbe_get_settings,
        .set_settings           = ixgbe_set_settings,
@@ -2166,7 +1896,6 @@ static struct ethtool_ops ixgbe_ethtool_ops = {
        .get_regs_len           = ixgbe_get_regs_len,
        .get_regs               = ixgbe_get_regs,
        .get_wol                = ixgbe_get_wol,
-       .set_wol                = ixgbe_set_wol,
        .nway_reset             = ixgbe_nway_reset,
        .get_link               = ethtool_op_get_link,
        .get_eeprom_len         = ixgbe_get_eeprom_len,
@@ -2199,9 +1928,9 @@ static struct ethtool_ops ixgbe_ethtool_ops = {
 #endif
        .get_coalesce           = ixgbe_get_coalesce,
        .set_coalesce           = ixgbe_set_coalesce,
-#ifdef ETHTOOL_GFLAGS
+#ifndef IXGBE_NO_INET_LRO
        .get_flags              = ethtool_op_get_flags,
-       .set_flags              = ixgbe_set_flags,
+       .set_flags              = ethtool_op_set_flags,
 #endif
 };
 
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6670774..ec2fe1a 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
 /******************************************************************************
  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
 ******************************************************************************/
+
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -52,7 +53,6 @@
 #include <linux/if_vlan.h>
 #endif
 
-
 #include "ixgbe.h"
 
 char ixgbe_driver_name[] = "ixgbe";
@@ -66,11 +66,9 @@ static const char ixgbe_driver_string[] =
 #define DRIVERNAPI "-NAPI"
 #endif
 
-#define FPGA
-
-#define DRV_VERSION "2.0.34.3" DRIVERNAPI DRV_HW_PERF FPGA
+#define DRV_VERSION "1.3.56.5-vmq" DRIVERNAPI DRV_HW_PERF
 const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
 /* ixgbe_pci_tbl - PCI Device ID Table
  *
  * Wildcard entries (PCI_ANY_ID) should come last
@@ -81,7 +79,6 @@ static char ixgbe_copyright[] = "Copyright (c) 1999-2009 
Intel Corporation.";
  */
 static struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)},
@@ -91,9 +88,6 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)},
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)},
        /* required last entry */
        {0, }
 };
@@ -107,8 +101,8 @@ static struct notifier_block dca_notifier = {
        .next          = NULL,
        .priority      = 0
 };
-
 #endif
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@xxxxxxxxx>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -136,69 +130,17 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter 
*adapter)
                        ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
 }
 
-/*
- * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
- * @adapter: pointer to adapter struct
- * @direction: 0 for Rx, 1 for Tx, -1 for other causes
- * @queue: queue to map the corresponding interrupt to
- * @msix_vector: the vector to map to the corresponding queue
- *
- */
-static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
-                          u8 queue, u8 msix_vector)
+static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
+                           u8 msix_vector)
 {
        u32 ivar, index;
-       struct ixgbe_hw *hw = &adapter->hw;
-       switch (hw->mac.type) {
-       case ixgbe_mac_82598EB:
-               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
-               if (direction == -1)
-                       direction = 0;
-               index = (((direction * 64) + queue) >> 2) & 0x1F;
-               ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
-               ivar &= ~(0xFF << (8 * (queue & 0x3)));
-               ivar |= (msix_vector << (8 * (queue & 0x3)));
-               IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
-               break;
-       case ixgbe_mac_82599EB:
-               if (direction == -1) {
-                       /* other causes */
-                       msix_vector |= IXGBE_IVAR_ALLOC_VAL;
-                       index = ((queue & 1) * 8);
-                       ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
-                       ivar &= ~(0xFF << index);
-                       ivar |= (msix_vector << index);
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
-                       break;
-               } else {
-                       /* tx or rx causes */
-                       msix_vector |= IXGBE_IVAR_ALLOC_VAL;
-                       index = ((16 * (queue & 1)) + (8 * direction));
-                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
-                       ivar &= ~(0xFF << index);
-                       ivar |= (msix_vector << index);
-                       IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
-                       break;
-               }
-       default:
-               break;
-       }
-}
 
-static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
-                                          u64 qmask)
-{
-       u32 mask;
-
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
-       } else {
-               mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
-               mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
-       }
+       msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+       index = (int_alloc_entry >> 2) & 0x1F;
+       ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
+       ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
+       ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
 }
 
 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
@@ -267,38 +209,39 @@ static inline bool ixgbe_check_tx_hang(struct 
ixgbe_adapter *adapter,
 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
 #endif
 
+#define GET_TX_HEAD_FROM_RING(ring) (\
+       *(volatile u32 *) \
+       ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
 static void ixgbe_tx_timeout(struct net_device *netdev);
 
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
- * @q_vector: structure containing interrupt and ring information
+ * @adapter: board private structure
  * @tx_ring: tx ring to clean
  **/
-static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
+static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
                                struct ixgbe_ring *tx_ring)
 {
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
-       union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+       union ixgbe_adv_tx_desc *tx_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int i, eop, count = 0;
+       struct net_device *netdev = adapter->netdev;
+       struct sk_buff *skb;
+       unsigned int i;
+       u32 head, oldhead;
+       unsigned int count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
 
+       rmb();
+       head = GET_TX_HEAD_FROM_RING(tx_ring);
+       head = le32_to_cpu(head);
        i = tx_ring->next_to_clean;
-       eop = tx_ring->tx_buffer_info[i].next_to_watch;
-       eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-
-       while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
-              (count < tx_ring->work_limit)) {
-               bool cleaned = false;
-               for ( ; !cleaned; count++) {
-                       struct sk_buff *skb;
+       while (1) {
+               while (i != head) {
                        tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
-                       cleaned = (i == eop);
                        skb = tx_buffer_info->skb;
 
-                       if (cleaned && skb) {
+                       if (skb) {
 #ifdef NETIF_F_TSO
                                unsigned int segs, bytecount;
 
@@ -318,17 +261,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector 
*q_vector,
                        ixgbe_unmap_and_free_tx_resource(adapter,
                                                         tx_buffer_info);
 
-                       tx_desc->wb.status = 0;
-
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
-               }
-
-               eop = tx_ring->tx_buffer_info[i].next_to_watch;
-               eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-       }
 
+                       count++;
+                       if (count == tx_ring->count)
+                               goto done_cleaning;
+               }
+               oldhead = head;
+               rmb();
+               head = GET_TX_HEAD_FROM_RING(tx_ring);
+               head = le32_to_cpu(head);
+               if (head == oldhead)
+                       goto done_cleaning;
+       } /* while (1) */
+
+done_cleaning:
        tx_ring->next_to_clean = i;
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -363,20 +312,18 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector 
*q_vector,
                }
        }
 
-#ifndef CONFIG_IXGBE_NAPI
        /* re-arm the interrupt */
-       if ((count >= tx_ring->work_limit) &&
-           (!test_bit(__IXGBE_DOWN, &adapter->state)))
-               ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
+       if ((total_packets >= tx_ring->work_limit) ||
+           (count == tx_ring->count))
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
 
-#endif
        tx_ring->total_bytes += total_bytes;
        tx_ring->total_packets += total_packets;
        tx_ring->stats.packets += total_packets;
        tx_ring->stats.bytes += total_bytes;
        adapter->net_stats.tx_bytes += total_bytes;
        adapter->net_stats.tx_packets += total_packets;
-       return (count < tx_ring->work_limit);
+       return (total_packets ? true : false);
 }
 
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
@@ -386,34 +333,17 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter 
*adapter,
        u32 rxctrl;
        int cpu = get_cpu();
        int q = rx_ring - adapter->rx_ring;
-       struct ixgbe_hw *hw = &adapter->hw;
 
        if (rx_ring->cpu != cpu) {
-               rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(q));
-               if (hw->mac.type == ixgbe_mac_82598EB) {
-                       rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
-                       rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-               } else if (hw->mac.type == ixgbe_mac_82599EB) {
-                       rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
-                       rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                           IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
-               }
+               rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
+               rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+               rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
                rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
                rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED_DATA) {
-                       /* just do the header data when in Packet Split mode */
-                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
-                               rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
-                       else
-                               rxctrl |= IXGBE_DCA_RXCTRL_DATA_DCA_EN;
-               }
-               rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
-               rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
-                           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(q), rxctrl);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
                rx_ring->cpu = cpu;
        }
-       put_cpu_no_resched();
+       put_cpu();
 }
 
 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
@@ -422,23 +352,13 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter 
*adapter,
        u32 txctrl;
        int cpu = get_cpu();
        int q = tx_ring - adapter->tx_ring;
-       struct ixgbe_hw *hw = &adapter->hw;
 
        if (tx_ring->cpu != cpu) {
-               if (hw->mac.type == ixgbe_mac_82598EB) {
-                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
-                       txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
-                       txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
-               } else if (hw->mac.type == ixgbe_mac_82599EB) {
-                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
-                       txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
-                       txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                           IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
-                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
-               }
+               txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
+               txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+               txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
                tx_ring->cpu = cpu;
        }
        put_cpu();
@@ -451,9 +371,6 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
        if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
                return;
 
-       /* Always use CB2 mode, difference is masked in the CB driver. */
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
-
        for (i = 0; i < adapter->num_tx_queues; i++) {
                adapter->tx_ring[i].cpu = -1;
                ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
@@ -475,6 +392,9 @@ static int __ixgbe_notify_dca(struct device *dev, void 
*data)
                /* if we're already enabled, don't do it again */
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                        break;
+               /* Always use CB2 mode, difference is masked
+                * in the CB driver. */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
                if (dca_add_requester(dev) == 0) {
                        adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
                        ixgbe_setup_dca(adapter);
@@ -496,35 +416,58 @@ static int __ixgbe_notify_dca(struct device *dev, void 
*data)
 #endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
 /**
  * ixgbe_receive_skb - Send a completed packet up the stack
- * @q_vector: structure containing interrupt and ring information
+ * @adapter: board private structure
  * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
  **/
-static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
-                              struct sk_buff *skb, u16 vlan_tag)
+static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
+                              struct sk_buff *skb, u8 status,
+                              struct ixgbe_ring *ring,
+                              union ixgbe_adv_rx_desc *rx_desc)
 {
-       struct ixgbe_adapter *adapter = q_vector->adapter;
        int ret;
+       bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+       u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) && ring->queue_index) {
+               /* This is a VMDq packet destined for a VM. */
+               vmq_netif_rx(skb, ring->queue_index);
+               return;
+       }
+#endif
+#ifndef IXGBE_NO_INET_LRO
+       if (adapter->netdev->features & NETIF_F_LRO &&
+           skb->ip_summed == CHECKSUM_UNNECESSARY) {
+#ifdef NETIF_F_HW_VLAN_TX
+               if (adapter->vlgrp && is_vlan && (tag != 0))
+                       lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
+                                                    adapter->vlgrp, tag,
+                                                    rx_desc);
+               else
+#endif
+                       lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
+               ring->lro_used = true;
+       } else {
+#endif /* IXGBE_NO_INET_LRO */
 #ifdef CONFIG_IXGBE_NAPI
                if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
 #ifdef NETIF_F_HW_VLAN_TX
-                       if (adapter->vlgrp && vlan_tag)
-                               vlan_gro_receive(&q_vector->napi,
-                                                adapter->vlgrp,
-                                                vlan_tag, skb);
+                       if (adapter->vlgrp && is_vlan && (tag != 0))
+                               vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 
tag);
                        else
-                               napi_gro_receive(&q_vector->napi, skb);
+                               netif_receive_skb(skb);
 #else
-                       napi_gro_receive(&q_vector->napi, skb);
+                       netif_receive_skb(skb);
 #endif
                } else {
 #endif /* CONFIG_IXGBE_NAPI */
 
 #ifdef NETIF_F_HW_VLAN_TX
-                       if (adapter->vlgrp && vlan_tag)
-                               ret = vlan_hwaccel_rx(skb, adapter->vlgrp,
-                                                     vlan_tag);
+                       if (adapter->vlgrp && is_vlan && (tag != 0))
+                               ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
                        else
                                ret = netif_rx(skb);
 #else
@@ -537,19 +480,20 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector 
*q_vector,
 #ifdef CONFIG_IXGBE_NAPI
                }
 #endif /* CONFIG_IXGBE_NAPI */
+#ifndef IXGBE_NO_INET_LRO
+       }
+#endif
 }
 
 /**
  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
  * @adapter: address of board private structure
- * @rx_desc: current Rx descriptor being processed
+ * @status_err: hardware indication of status of receive
  * @skb: skb currently being received and modified
  **/
 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
-                                     union ixgbe_adv_rx_desc *rx_desc,
-                                     struct sk_buff *skb)
+                                     u32 status_err, struct sk_buff *skb)
 {
-       u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Rx csum disabled */
@@ -576,19 +520,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter 
*adapter,
        adapter->hw_csum_rx_good++;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
-                                        struct ixgbe_ring *rx_ring, u32 val)
-{
-       /*
-        * Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
-        */
-       wmb();
-       writel(val, hw->hw_addr + rx_ring->tail);
-}
-
 /**
  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
  * @adapter: address of board private structure
@@ -605,6 +536,11 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter 
*adapter,
 
        i = rx_ring->next_to_use;
        bi = &rx_ring->rx_buffer_info[i];
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+           (!rx_ring->active))
+               return;
+#endif
 
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
@@ -630,28 +566,50 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter 
*adapter,
                }
 
                if (!bi->skb) {
-                       struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
-                                                              bufsz);
+                       struct sk_buff *skb;
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+                       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+                           rx_ring->queue_index) {
+                               skb = vmq_alloc_skb(adapter->netdev,
+                                                   rx_ring->queue_index,
+                                                   bufsz);
+                               if (!skb) {
+                                       adapter->alloc_rx_buff_failed++;
+                                       goto no_buffers;
+                               }
+                               bi->skb = skb;
+                               bi->dma = pci_map_page(pdev,
+                                               skb_shinfo(skb)->frags[0].page,
+                                               
skb_shinfo(skb)->frags[0].page_offset,
+                                               skb_shinfo(skb)->frags[0].size,
+                                               PCI_DMA_FROMDEVICE);
+                       } else {
+#endif
+                               skb = netdev_alloc_skb(adapter->netdev, bufsz);
 
-                       if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
-                               goto no_buffers;
-                       }
+                               if (!skb) {
+                                       adapter->alloc_rx_buff_failed++;
+                                       goto no_buffers;
+                               }
 
-                       /*
-                        * Make buffer alignment 2 beyond a 16 byte boundary
-                        * this will result in a 16 byte aligned IP header after
-                        * the 14 byte MAC header is removed
-                        */
-                       skb_reserve(skb, NET_IP_ALIGN);
+                               skb->dev = adapter->netdev;
+
+                               /*
+                                * Make buffer alignment 2 beyond a 16
+                                * byte boundary this will result in a
+                                * 16 byte aligned IP header after the
+                                * 14 byte MAC header is removed
+                                */
+                               skb_reserve(skb, NET_IP_ALIGN);
 
-                       bi->skb = skb;
+                               bi->skb = skb;
+                               bi->dma = pci_map_single(pdev, skb->data, bufsz,
+                                                        PCI_DMA_FROMDEVICE);
+                       }
+#ifdef CONFIG_XEN_NETDEV2_VMQ
                }
+#endif
 
-               if (!bi->dma)
-                       bi->dma = pci_map_single(pdev, bi->skb->data, 
rx_ring->rx_buf_len,
-                                                PCI_DMA_FROMDEVICE);
-               
                /* Refresh the desc even if buffer_addrs didn't change because
                 * each write-back erases this info. */
                if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -673,7 +631,14 @@ no_buffers:
                if (i-- == 0)
                        i = (rx_ring->count - 1);
 
-               ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+               /*
+                * Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64).
+                */
+               wmb();
+               writel(i, adapter->hw.hw_addr + rx_ring->tail);
        }
 }
 
@@ -682,78 +647,39 @@ static inline u16 ixgbe_get_hdr_info(union 
ixgbe_adv_rx_desc *rx_desc)
        return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
 }
 
-#if !defined(IXGBE_NO_LRO) || !defined(IXGBE_NO_HW_RSC)
-/**
- * ixgbe_transform_rsc_queue - change rsc queue into a full packet
- * @skb: pointer to the last skb in the rsc queue
- *
- * This function changes a queue full of hw rsc buffers into a completed
- * packet.  It uses the ->prev pointers to find the first packet and then
- * turns it into the frag list owner.
- **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
 {
-       unsigned int frag_list_size = 0;
-
-       while (skb->prev) {
-               struct sk_buff *prev = skb->prev;
-               frag_list_size += skb->len;
-               skb->prev = NULL;
-               skb = prev;
-       }
-
-       skb_shinfo(skb)->frag_list = skb->next;
-       skb->next = NULL;
-       skb->len += frag_list_size;
-       skb->data_len += frag_list_size;
-       skb->truesize += frag_list_size;
-       return skb;
+       return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 }
 
-#endif /* !IXGBE_NO_LRO || !IXGBE_NO_HW_RSC */
 #ifndef IXGBE_NO_LRO
-/**
- * ixgbe_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
- * @adapter: board private structure
- * @rx_desc: pointer to the rx descriptor
- *
- **/
-static inline bool ixgbe_can_lro(struct ixgbe_adapter *adapter,
-                                 union ixgbe_adv_rx_desc *rx_desc)
-{
-       u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-
-       return (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED) &&
-               !(adapter->netdev->flags & IFF_PROMISC) &&
-               (pkt_info & IXGBE_RXDADV_PKTTYPE_IPV4) &&
-               (pkt_info & IXGBE_RXDADV_PKTTYPE_TCP);
-}
+static int lromax = 44;
 
 /**
- * ixgbe_lro_flush - Indicate packets to upper layer.
+ * ixgbe_lro_ring_flush - Indicate packets to upper layer.
  *
  * Update IP and TCP header part of head skb if more than one
  * skb's chained and indicate packets to upper layer.
  **/
-static void ixgbe_lro_flush(struct ixgbe_q_vector *q_vector,
-                                 struct ixgbe_lro_desc *lrod)
+static void ixgbe_lro_ring_flush(struct ixgbe_lro_list *lrolist,
+                                 struct ixgbe_adapter *adapter,
+                                 struct ixgbe_lro_desc *lrod, u8 status,
+                                 struct ixgbe_ring *rx_ring,
+                                 union ixgbe_adv_rx_desc *rx_desc)
 {
-       struct ixgbe_lro_list *lrolist = q_vector->lrolist;
        struct iphdr *iph;
        struct tcphdr *th;
        struct sk_buff *skb;
        u32 *ts_ptr;
+       struct ixgbe_lro_info *lro_data = &adapter->lro_data;
+       struct net_device *netdev = adapter->netdev;
 
        hlist_del(&lrod->lro_node);
        lrolist->active_cnt--;
 
        skb = lrod->skb;
-       lrod->skb = NULL;
 
        if (lrod->append_cnt) {
-               /* take the lro queue and convert to skb format */
-               skb = ixgbe_transform_rsc_queue(skb);
-
                /* incorporate ip header and re-calculate checksum */
                iph = (struct iphdr *)skb->data;
                iph->tot_len = ntohs(skb->len);
@@ -763,12 +689,10 @@ static void ixgbe_lro_flush(struct ixgbe_q_vector 
*q_vector,
                /* incorporate the latest ack into the tcp header */
                th = (struct tcphdr *) ((char *)skb->data + sizeof(*iph));
                th->ack_seq = lrod->ack_seq;
-               th->psh = lrod->psh;
                th->window = lrod->window;
-               th->check = 0;
 
                /* incorporate latest timestamp into the tcp header */
-               if (lrod->opt_bytes) {
+               if (lrod->timestamp) {
                        ts_ptr = (u32 *)(th + 1);
                        ts_ptr[1] = htonl(lrod->tsval);
                        ts_ptr[2] = lrod->tsecr;
@@ -778,27 +702,38 @@ static void ixgbe_lro_flush(struct ixgbe_q_vector 
*q_vector,
 #ifdef NETIF_F_TSO
        skb_shinfo(skb)->gso_size = lrod->mss;
 #endif
-       ixgbe_receive_skb(q_vector, skb, lrod->vlan_tag);
-       lrolist->stats.flushed++;
+       ixgbe_receive_skb(adapter, skb, status, rx_ring, rx_desc);
 
-       
+       netdev->last_rx = jiffies;
+       lro_data->stats.coal += lrod->append_cnt + 1;
+       lro_data->stats.flushed++;
+
+       lrod->skb = NULL;
+       lrod->last_skb = NULL;
+       lrod->timestamp = 0;
+       lrod->append_cnt = 0;
+       lrod->data_size = 0;
        hlist_add_head(&lrod->lro_node, &lrolist->free);
 }
 
-static void ixgbe_lro_flush_all(struct ixgbe_q_vector *q_vector)
+static void ixgbe_lro_ring_flush_all(struct ixgbe_lro_list *lrolist,
+                                     struct ixgbe_adapter *adapter, u8 status,
+                                     struct ixgbe_ring *rx_ring,
+                                     union ixgbe_adv_rx_desc *rx_desc)
 {
        struct ixgbe_lro_desc *lrod;
        struct hlist_node *node, *node2;
-       struct ixgbe_lro_list *lrolist = q_vector->lrolist;
 
        hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, lro_node)
-               ixgbe_lro_flush(q_vector, lrod);
+               ixgbe_lro_ring_flush(lrolist, adapter, lrod, status, rx_ring,
+                                    rx_desc);
 }
 
 /*
  * ixgbe_lro_header_ok - Main LRO function.
  **/
-static u16 ixgbe_lro_header_ok(struct sk_buff *new_skb, struct iphdr *iph,
+static int ixgbe_lro_header_ok(struct ixgbe_lro_info *lro_data,
+                               struct sk_buff *new_skb, struct iphdr *iph,
                                struct tcphdr *th)
 {
        int opt_bytes, tcp_data_len;
@@ -843,135 +778,154 @@ static u16 ixgbe_lro_header_ok(struct sk_buff *new_skb, 
struct iphdr *iph,
 
        tcp_data_len = ntohs(iph->tot_len) - (th->doff << 2) - sizeof(*iph);
 
+       if (tcp_data_len == 0)
+               return -1;
+
        return tcp_data_len;
 }
 
 /**
- * ixgbe_lro_queue - if able, queue skb into lro chain
- * @q_vector: structure containing interrupt and ring information
+ * ixgbe_lro_ring_queue - if able, queue skb into lro chain
+ * @lrolist: pointer to structure for lro entries
+ * @adapter: address of board private structure
  * @new_skb: pointer to current skb being checked
- * @tag: vlan tag for skb
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
  *
  * Checks whether the skb given is eligible for LRO and if that's
  * fine chains it to the existing lro_skb based on flowid. If an LRO for
  * the flow doesn't exist create one.
  **/
-static struct sk_buff *ixgbe_lro_queue(struct ixgbe_q_vector *q_vector,
-                                       struct sk_buff *new_skb, 
-                                      u16 tag)
+static int ixgbe_lro_ring_queue(struct ixgbe_lro_list *lrolist,
+                                struct ixgbe_adapter *adapter,
+                                struct sk_buff *new_skb, u8 status,
+                                struct ixgbe_ring *rx_ring,
+                                union ixgbe_adv_rx_desc *rx_desc)
 {
+       struct ethhdr *eh;
+       struct iphdr *iph;
+       struct tcphdr *th, *header_th;
+       int  opt_bytes, header_ok = 1;
+       u32 *ts_ptr = NULL;
        struct sk_buff *lro_skb;
        struct ixgbe_lro_desc *lrod;
        struct hlist_node *node;
-       struct skb_shared_info *new_skb_info = skb_shinfo(new_skb);
-       struct ixgbe_lro_list *lrolist = q_vector->lrolist;
-       struct iphdr *iph = (struct iphdr *)new_skb->data;
-       struct tcphdr *th = (struct tcphdr *)(iph + 1);
-       int tcp_data_len = ixgbe_lro_header_ok(new_skb, iph, th);
-       u16  opt_bytes = (th->doff << 2) - sizeof(*th);
-       u32 *ts_ptr = (opt_bytes ? (u32 *)(th + 1) : NULL);
-       u32 seq = ntohl(th->seq);
+       u32 seq;
+       struct ixgbe_lro_info *lro_data = &adapter->lro_data;
+       int tcp_data_len;
+       u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+       /* Disable LRO when in promiscuous mode, useful for debugging LRO */
+       if (adapter->netdev->flags & IFF_PROMISC)
+               return -1;
+
+       eh = (struct ethhdr *)skb_mac_header(new_skb);
+       iph = (struct iphdr *)(eh + 1);
+
+       /* check to see if it is IPv4/TCP */
+       if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
+            (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
+               return -1;
+
+       /* find the TCP header */
+       th = (struct tcphdr *) (iph + 1);
+
+       tcp_data_len = ixgbe_lro_header_ok(lro_data, new_skb, iph, th);
+       if (tcp_data_len == -1)
+               header_ok = 0;
 
+       /* make sure any packet we are about to chain doesn't include any pad */
+       skb_trim(new_skb, ntohs(iph->tot_len));
+
+       opt_bytes = (th->doff << 2) - sizeof(*th);
+       if (opt_bytes != 0)
+               ts_ptr = (u32 *)(th + 1);
+
+       seq = ntohl(th->seq);
        /*
         * we have a packet that might be eligible for LRO,
         * so see if it matches anything we might expect
         */
        hlist_for_each_entry(lrod, node, &lrolist->active, lro_node) {
-               if (lrod->source_port != th->source ||
-                       lrod->dest_port != th->dest ||
-                       lrod->source_ip != iph->saddr ||
-                       lrod->dest_ip != iph->daddr ||
-                       lrod->vlan_tag != tag)
-                       continue;
-
-               /* malformed header, or resultant packet would be too large */
-               if (tcp_data_len < 0 || (tcp_data_len + lrod->len) > 65535) {
-                       ixgbe_lro_flush(q_vector, lrod);
-                       break;
-               }
-
-               /* out of order packet */
-               if (seq != lrod->next_seq) {
-                       ixgbe_lro_flush(q_vector, lrod);
-                       tcp_data_len = -1;
-                       break;
-               }
+               if (lrod->source_port == th->source &&
+                       lrod->dest_port == th->dest &&
+                       lrod->source_ip == iph->saddr &&
+                       lrod->dest_ip == iph->daddr &&
+                       lrod->vlan_tag == tag) {
+
+                       if (!header_ok) {
+                               ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+                                                    status, rx_ring, rx_desc);
+                               return -1;
+                       }
 
-               if (lrod->opt_bytes || opt_bytes) {
-                       u32 tsval = ntohl(*(ts_ptr + 1));
-                       /* make sure timestamp values are increasing */
-                       if (opt_bytes != lrod->opt_bytes || 
-                           lrod->tsval > tsval || *(ts_ptr + 2) == 0) {
-                               ixgbe_lro_flush(q_vector, lrod);
-                               tcp_data_len = -1;
-                               break;
+                       if (seq != lrod->next_seq) {
+                               /* out of order packet */
+                               ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+                                                    status, rx_ring, rx_desc);
+                               return -1;
                        }
-                               
-                       lrod->tsval = tsval;
-                       lrod->tsecr = *(ts_ptr + 2);
-               }
 
-               /* remove any padding from the end of the skb */
-               __pskb_trim(new_skb, ntohs(iph->tot_len));
-               /* Remove IP and TCP header*/
-               skb_pull(new_skb, ntohs(iph->tot_len) - tcp_data_len);
+                       if (lrod->timestamp) {
+                               u32 tsval = ntohl(*(ts_ptr + 1));
+                               /* make sure timestamp values are increasing */
+                               if (lrod->tsval > tsval || *(ts_ptr + 2) == 0) {
+                                       ixgbe_lro_ring_flush(lrolist, adapter,
+                                                            lrod, status,
+                                                            rx_ring, rx_desc);
+                                       return -1;
+                               }
+                               lrod->tsval = tsval;
+                               lrod->tsecr = *(ts_ptr + 2);
+                       }
 
-               lrod->next_seq += tcp_data_len;
-               lrod->ack_seq = th->ack_seq;
-               lrod->window = th->window;
-               lrod->len += tcp_data_len;
-               lrod->psh |= th->psh;
-               lrod->append_cnt++;
-               lrolist->stats.coal++;
+                       lro_skb = lrod->skb;
 
-               if (tcp_data_len > lrod->mss)
-                       lrod->mss = tcp_data_len;
+                       lro_skb->len += tcp_data_len;
+                       lro_skb->data_len += tcp_data_len;
+                       lro_skb->truesize += tcp_data_len;
 
-               lro_skb = lrod->skb;
+                       lrod->next_seq += tcp_data_len;
+                       lrod->ack_seq = th->ack_seq;
+                       lrod->window = th->window;
+                       lrod->data_size += tcp_data_len;
+                       if (tcp_data_len > lrod->mss)
+                               lrod->mss = tcp_data_len;
 
-               /* if header is empty pull pages into current skb */
-               if (!skb_headlen(new_skb) &&
-                   ((skb_shinfo(lro_skb)->nr_frags +
-                     skb_shinfo(new_skb)->nr_frags) <= MAX_SKB_FRAGS )) {
-                       struct skb_shared_info *lro_skb_info = 
skb_shinfo(lro_skb);
+                       /* Remove IP and TCP header*/
+                       skb_pull(new_skb, ntohs(iph->tot_len) - tcp_data_len);
 
-                       /* copy frags into the last skb */
-                       memcpy(lro_skb_info->frags + lro_skb_info->nr_frags,
-                              new_skb_info->frags,
-                              new_skb_info->nr_frags * sizeof(skb_frag_t));
+                       /* Chain this new skb in frag_list */
+                       if (skb_shinfo(lro_skb)->frag_list != NULL )
+                               lrod->last_skb->next = new_skb;
+                       else
+                               skb_shinfo(lro_skb)->frag_list = new_skb;
 
-                       lro_skb_info->nr_frags += new_skb_info->nr_frags;
-                       lro_skb->len += tcp_data_len;
-                       lro_skb->data_len += tcp_data_len;
-                       lro_skb->truesize += tcp_data_len;
+                       lrod->last_skb = new_skb ;
 
-                       new_skb_info->nr_frags = 0;
-                       new_skb->truesize -= tcp_data_len;
-                       new_skb->len = new_skb->data_len = 0;
-               } else if (tcp_data_len) {
-               /* Chain this new skb in frag_list */
-                       new_skb->prev = lro_skb;
-                       lro_skb->next = new_skb;
-                       lrod->skb = new_skb ;
-               }
+                       lrod->append_cnt++;
 
-               if (lrod->psh)
-                       ixgbe_lro_flush(q_vector, lrod);
+                       /* New packet with push flag, flush the whole packet. */
+                       if (th->psh) {
+                               header_th =
+                               (struct tcphdr *)(lro_skb->data + sizeof(*iph));
+                               header_th->psh |= th->psh;
+                               ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+                                                    status, rx_ring, rx_desc);
+                               return 0;
+                       }
 
-               /* return the skb if it is empty for recycling */
-               if (!new_skb->len) {
-                       new_skb->data = skb_mac_header(new_skb);
-                       __pskb_trim(new_skb, 0);
-                       new_skb->protocol = 0;
-                       lrolist->stats.recycled++;
-                       return new_skb;
-               }
+                       if (lrod->append_cnt >= lro_data->max)
+                               ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+                                                    status, rx_ring, rx_desc);
 
-               return NULL;
+                       return 0;
+               } /*End of if*/
        }
 
        /* start a new packet */
-       if (tcp_data_len > 0 && !hlist_empty(&lrolist->free) && !th->psh) {
+       if (header_ok && !hlist_empty(&lrolist->free)) {
                lrod = hlist_entry(lrolist->free.first, struct ixgbe_lro_desc,
                                   lro_node);
 
@@ -980,18 +934,16 @@ static struct sk_buff *ixgbe_lro_queue(struct 
ixgbe_q_vector *q_vector,
                lrod->dest_ip = iph->daddr;
                lrod->source_port = th->source;
                lrod->dest_port = th->dest;
-               lrod->vlan_tag = tag;
-               lrod->len = new_skb->len;
                lrod->next_seq = seq + tcp_data_len;
+               lrod->mss = tcp_data_len;
                lrod->ack_seq = th->ack_seq;
                lrod->window = th->window;
-               lrod->mss = tcp_data_len;
-               lrod->opt_bytes = opt_bytes;
-               lrod->psh = 0;
-               lrod->append_cnt = 0;
+               lrod->data_size = tcp_data_len;
+               lrod->vlan_tag = tag;
 
                /* record timestamp if it is present */
                if (opt_bytes) {
+                       lrod->timestamp = 1;
                        lrod->tsval = ntohl(*(ts_ptr + 1));
                        lrod->tsecr = *(ts_ptr + 2);
                }
@@ -1000,13 +952,11 @@ static struct sk_buff *ixgbe_lro_queue(struct 
ixgbe_q_vector *q_vector,
                /* .. and insert at the front of the active list */
                hlist_add_head(&lrod->lro_node, &lrolist->active);
                lrolist->active_cnt++;
-               lrolist->stats.coal++;
-               return NULL;
+
+               return 0;
        }
 
-       /* packet not handled by any of the above, pass it to the stack */
-       ixgbe_receive_skb(q_vector, new_skb, tag);
-       return NULL;
+       return -1;
 }
 
 static void ixgbe_lro_ring_exit(struct ixgbe_lro_list *lrolist)
@@ -1027,7 +977,8 @@ static void ixgbe_lro_ring_exit(struct ixgbe_lro_list 
*lrolist)
        }
 }
 
-static void ixgbe_lro_ring_init(struct ixgbe_lro_list *lrolist)
+static void ixgbe_lro_ring_init(struct ixgbe_lro_list *lrolist,
+                                struct ixgbe_adapter *adapter)
 {
        int j, bytes;
        struct ixgbe_lro_desc *lrod;
@@ -1042,62 +993,30 @@ static void ixgbe_lro_ring_init(struct ixgbe_lro_list 
*lrolist)
                if (lrod != NULL) {
                        INIT_HLIST_NODE(&lrod->lro_node);
                        hlist_add_head(&lrod->lro_node, &lrolist->free);
+               } else {
+                       DPRINTK(PROBE, ERR,
+                               "Allocation for LRO descriptor %u failed\n", j);
                }
        }
 }
 
 #endif /* IXGBE_NO_LRO */
-
-#ifndef IXGBE_NO_HW_RSC
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
-               IXGBE_RXDADV_RSCCNT_MASK) >>
-               IXGBE_RXDADV_RSCCNT_SHIFT;
-}
-
-#endif /* IXGBE_NO_HW_RSC */
-
-static void ixgbe_rx_status_indication(u32 staterr,
-                                       struct ixgbe_adapter *adapter)
-{
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82599EB:
-               if (staterr & IXGBE_RXD_STAT_FLM)
-                       adapter->flm++;
-#ifndef IXGBE_NO_LLI
-               if (staterr & IXGBE_RXD_STAT_DYNINT)
-                       adapter->lli_int++;
-#endif /* IXGBE_NO_LLI */
-               break;
-       case ixgbe_mac_82598EB:
-#ifndef IXGBE_NO_LLI
-               if (staterr & IXGBE_RXD_STAT_DYNINT)
-                       adapter->lli_int++;
-#endif /* IXGBE_NO_LLI */
-               break;
-       default:
-               break;
-       }
-}
-
 #ifdef CONFIG_IXGBE_NAPI
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
                                struct ixgbe_ring *rx_ring,
                                int *work_done, int work_to_do)
 #else
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
                                struct ixgbe_ring *rx_ring)
 #endif
 {
-       struct ixgbe_adapter *adapter = q_vector->adapter;
        struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
-       unsigned int i, rsc_count = 0;
+       unsigned int i;
        u32 len, staterr;
-       u16 hdr_info, vlan_tag;
+       u16 hdr_info;
        bool cleaned = false;
        int cleaned_count = 0;
 #ifndef CONFIG_IXGBE_NAPI
@@ -1121,23 +1040,44 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector 
*q_vector,
                        hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
                        len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
                               IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+                       if (hdr_info & IXGBE_RXDADV_SPH)
+                               adapter->rx_hdr_split++;
                        if (len > IXGBE_RX_HDR_SIZE)
                                len = IXGBE_RX_HDR_SIZE;
                        upper_len = le16_to_cpu(rx_desc->wb.upper.length);
                } else {
                        len = le16_to_cpu(rx_desc->wb.upper.length);
                }
+
+#ifndef IXGBE_NO_LLI
+               if (staterr & IXGBE_RXD_STAT_DYNINT)
+                       adapter->lli_int++;
+#endif
+
                cleaned = true;
                skb = rx_buffer_info->skb;
-               prefetch(skb->data - NET_IP_ALIGN);
                rx_buffer_info->skb = NULL;
-
-               /* if this is a skb from previous receive dma will be 0 */
-               if (rx_buffer_info->dma) {
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+               if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+                    rx_ring->queue_index) {
+                       /* for Xen VMDq, packet data goes in first page of
+                        * skb, instead of data.
+                        */
+                       /* TODO this is broke for jumbos > 4k */
+                       pci_unmap_page(pdev, rx_buffer_info->dma,
+                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       skb->len += len;
+                       skb_shinfo(skb)->frags[0].size = len;
+               } else {
+                       prefetch(skb->data - NET_IP_ALIGN);
+               }
+#else
+               prefetch(skb->data - NET_IP_ALIGN);
+#endif
+               if (len && !skb_shinfo(skb)->nr_frags) {
                        pci_unmap_single(pdev, rx_buffer_info->dma,
-                                        rx_ring->rx_buf_len,
+                                        rx_ring->rx_buf_len + NET_IP_ALIGN,
                                         PCI_DMA_FROMDEVICE);
-                       rx_buffer_info->dma = 0;
                        skb_put(skb, len);
                }
 
@@ -1150,7 +1090,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector 
*q_vector,
                                           rx_buffer_info->page_offset,
                                           upper_len);
 
-                       if (page_count(rx_buffer_info->page) != 1)
+                       if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+                           (page_count(rx_buffer_info->page) != 1))
                                rx_buffer_info->page = NULL;
                        else
                                get_page(rx_buffer_info->page);
@@ -1163,72 +1104,60 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector 
*q_vector,
                i++;
                if (i == rx_ring->count)
                        i = 0;
+               next_buffer = &rx_ring->rx_buffer_info[i];
 
                next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
                prefetch(next_rxd);
-               cleaned_count++;
-
-#ifndef IXGBE_NO_HW_RSC
-               if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-                       rsc_count = ixgbe_get_rsc_count(rx_desc);
-
-#endif
-               if (rsc_count) {
-                       u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
-                                    IXGBE_RXDADV_NEXTP_SHIFT;
-                       next_buffer = &rx_ring->rx_buffer_info[nextp];
-                       rx_ring->rsc_count += (rsc_count - 1);
-               } else {
-                       next_buffer = &rx_ring->rx_buffer_info[i];
-               }
 
+               cleaned_count++;
                if (staterr & IXGBE_RXD_STAT_EOP) {
-                       ixgbe_rx_status_indication(staterr, adapter);
-#ifndef IXGBE_NO_HW_RSC
-                       if (skb->prev)
-                               skb = ixgbe_transform_rsc_queue(skb);
-#endif
                        rx_ring->stats.packets++;
                        rx_ring->stats.bytes += skb->len;
                } else {
-                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-                               rx_buffer_info->skb = next_buffer->skb;
-                               rx_buffer_info->dma = next_buffer->dma;
-                               next_buffer->skb = skb;
-                               next_buffer->dma = 0;
-                       } else {
-                               skb->next = next_buffer->skb;
-                               skb->next->prev = skb;
-                       }
+                       rx_buffer_info->skb = next_buffer->skb;
+                       rx_buffer_info->dma = next_buffer->dma;
+                       next_buffer->skb = skb;
+                       next_buffer->dma = 0;
                        adapter->non_eop_descs++;
                        goto next_desc;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
-                       /* trim packet back to size 0 and recycle it */
-                       __pskb_trim(skb, 0);
-                       rx_buffer_info->skb = skb;
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+                       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+                            rx_ring->queue_index)
+                               vmq_free_skb(skb, rx_ring->queue_index);
+                       else
+#endif
+                               dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
 
-               ixgbe_rx_checksum(adapter, rx_desc, skb);
+               ixgbe_rx_checksum(adapter, staterr, skb);
 
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
                total_rx_packets++;
-
-               skb->protocol = eth_type_trans(skb, adapter->netdev);
-               vlan_tag = ((staterr & IXGBE_RXD_STAT_VP) ?
-                           le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+               if (!((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+                    rx_ring->queue_index))
+#endif
+                       skb->protocol = eth_type_trans(skb, adapter->netdev);
 
 #ifndef IXGBE_NO_LRO
-               if (ixgbe_can_lro(adapter, rx_desc))
-                       rx_buffer_info->skb = ixgbe_lro_queue(q_vector, skb, 
vlan_tag);
-               else
+               if (ixgbe_lro_ring_queue(rx_ring->lrolist,
+                               adapter, skb, staterr, rx_ring, rx_desc) == 0) {
+                       adapter->netdev->last_rx = jiffies;
+                       rx_ring->stats.packets++;
+                       if (upper_len)
+                               rx_ring->stats.bytes += upper_len;
+                       else
+                               rx_ring->stats.bytes += skb->len;
+                       goto next_desc;
+               }
 #endif
-                       ixgbe_receive_skb(q_vector, skb, vlan_tag);
-
+               ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
                adapter->netdev->last_rx = jiffies;
 
 next_desc:
@@ -1242,17 +1171,23 @@ next_desc:
 
                /* use prefetched values */
                rx_desc = next_rxd;
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
+               rx_buffer_info = next_buffer;
 
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
 
+       rx_ring->next_to_clean = i;
 #ifndef IXGBE_NO_LRO
-               if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED)
-                       ixgbe_lro_flush_all(q_vector);
+       ixgbe_lro_ring_flush_all(rx_ring->lrolist, adapter,
+                            staterr, rx_ring, rx_desc);
 #endif /* IXGBE_NO_LRO */
-       rx_ring->next_to_clean = i;
        cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+#ifndef IXGBE_NO_INET_LRO
+       if (rx_ring->lro_used) {
+               lro_flush_all(&rx_ring->lro_mgr);
+               rx_ring->lro_used = false;
+       }
+#endif
 
        if (cleaned_count)
                ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
@@ -1264,41 +1199,12 @@ next_desc:
 
 #ifndef CONFIG_IXGBE_NAPI
        /* re-arm the interrupt if we had to bail early and have more work */
-       if ((*work_done >= work_to_do) &&
-           (!test_bit(__IXGBE_DOWN, &adapter->state)))
-               ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
+       if (*work_done >= work_to_do)
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rx_ring->v_idx);
 #endif
        return cleaned;
 }
 
-/**
- * ixgbe_write_eitr - write EITR register in hardware specific way
- * @q_vector: structure containing interrupt and ring information
- *
- * This function is made to be called by ethtool and by the driver
- * when it needs to update EITR registers at runtime.  Hardware
- * specific quirks/differences are taken care of here.
- */
-void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
-{
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_hw *hw = &adapter->hw;
-       int v_idx = q_vector->v_idx;
-       u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
-
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               /* must write high and low 16 bits to reset counter */
-               itr_reg |= (itr_reg << 16);
-       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               /*
-                * set the WDIS bit to not clear the timer bits and cause an
-                * immediate assertion of the interrupt
-                */
-               itr_reg |= IXGBE_EITR_CNT_WDIS;
-       }
-       IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
-}
-
 #ifdef CONFIG_IXGBE_NAPI
 static int ixgbe_clean_rxonly(struct napi_struct *, int);
 #endif
@@ -1317,19 +1223,18 @@ static void ixgbe_configure_msix(struct ixgbe_adapter 
*adapter)
 
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-       /*
-        * Populate the IVAR table and set the ITR values to the
+       /* Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
        for (v_idx = 0; v_idx < q_vectors; v_idx++) {
-               q_vector = adapter->q_vector[v_idx];
+               q_vector = &adapter->q_vector[v_idx];
                /* XXX for_each_bit(...) */
                r_idx = find_first_bit(q_vector->rxr_idx,
                                       adapter->num_rx_queues);
 
                for (i = 0; i < q_vector->rxr_count; i++) {
                        j = adapter->rx_ring[r_idx].reg_idx;
-                       ixgbe_set_ivar(adapter, 0, j, v_idx);
+                       ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
                        r_idx = find_next_bit(q_vector->rxr_idx,
                                              adapter->num_rx_queues,
                                              r_idx + 1);
@@ -1339,7 +1244,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter 
*adapter)
 
                for (i = 0; i < q_vector->txr_count; i++) {
                        j = adapter->tx_ring[r_idx].reg_idx;
-                       ixgbe_set_ivar(adapter, 1, j, v_idx);
+                       ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
                        r_idx = find_next_bit(q_vector->txr_idx,
                                              adapter->num_tx_queues,
                                              r_idx + 1);
@@ -1348,22 +1253,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter 
*adapter)
                /* if this is a tx only vector halve the interrupt rate */
                if (q_vector->txr_count && !q_vector->rxr_count)
                        q_vector->eitr = (adapter->eitr_param >> 1);
-               else if (q_vector->rxr_count)
+               else
                        /* rx only */
                        q_vector->eitr = adapter->eitr_param;
 
-               ixgbe_write_eitr(q_vector);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
+                               EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
        }
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-               ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
-                              v_idx);
-       else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-               ixgbe_set_ivar(adapter, -1, 1, v_idx);
+       ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 #ifdef IXGBE_TCP_TIMER
-       ixgbe_set_ivar(adapter, -1, 0, ++v_idx);
-#endif /* IXGBE_TCP_TIMER */
+       ixgbe_set_ivar(adapter, IXGBE_IVAR_TCP_TIMER_INDEX, ++v_idx);
+#endif
 
        /* set up to autoclear timer, and the vectors */
        mask = IXGBE_EIMS_ENABLE_MASK;
@@ -1445,10 +1347,12 @@ update_itr_done:
 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 new_itr;
        u8 current_itr, ret_itr;
-       int i, r_idx;
-       struct ixgbe_ring *rx_ring = NULL, *tx_ring = NULL;
+       int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
+                             sizeof(struct ixgbe_q_vector);
+       struct ixgbe_ring *rx_ring, *tx_ring;
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
@@ -1497,14 +1401,14 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector 
*q_vector)
        }
 
        if (new_itr != q_vector->eitr) {
-
+               u32 itr_reg;
                /* do an exponential smoothing */
                new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-
-               /* save the algorithm value here */
                q_vector->eitr = new_itr;
-
-               ixgbe_write_eitr(q_vector);
+               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+               /* must write high and low 16 bits to reset counter */
+               DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 
itr_reg);
+               IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
        }
 
        return;
@@ -1522,26 +1426,6 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter 
*adapter, u32 eicr)
        }
 }
 
-static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-
-       if (eicr & IXGBE_EICR_GPI_SDP1) {
-               /* Clear the interrupt */
-               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       schedule_work(&adapter->multispeed_fiber_task);
-       } else if (eicr & IXGBE_EICR_GPI_SDP2) {
-               /* Clear the interrupt */
-               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       schedule_work(&adapter->sfp_config_module_task);
-       } else {
-               /* Interrupt isn't for us... */
-               return;
-       }
-}
-
 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1560,50 +1444,15 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        struct net_device *netdev = data;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 eicr;
-
-       /*
-        * Workaround of Silicon errata on 82598.  Use clear-by-write
-        * instead of clear-by-read to clear EICR , reading EICS gives the
-        * value of EICR without read-clear of EICR
-        */
-       eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
-       IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
+       u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               if (eicr & IXGBE_EICR_ECC) {
-                       DPRINTK(LINK, INFO, "Received unrecoverable ECC Err, "
-                                           "please reboot\n");
-                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
-               }
-               /* Handle Flow Director Full threshold interrupt */
-               if (eicr & IXGBE_EICR_FLOW_DIR) {
-                       int i;
-                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
-                       /* Disable transmits before FDIR Re-initialization */
-                       netif_tx_stop_all_queues(netdev);
-                       for (i = 0; i < adapter->num_tx_queues; i++) {
-                               struct ixgbe_ring *tx_ring =
-                                                          &adapter->tx_ring[i];
-                               if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
-                                                      &tx_ring->reinit_state))
-                                       
schedule_work(&adapter->fdir_reinit_task);
-                       }
-               }
-       }
-
        ixgbe_check_fan_failure(adapter, eicr);
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
-               ixgbe_check_sfp_event(adapter, eicr);
-
-       /* re-enable the original interrupt state, no lsc, no queues */
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
-                               ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
        return IRQ_HANDLED;
 }
@@ -1638,40 +1487,6 @@ static irqreturn_t ixgbe_msix_tcp_timer(int irq, void 
*data)
 }
 
 #endif /* IXGBE_TCP_TIMER */
-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
-                                          u64 qmask)
-{
-       u32 mask;
-
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       } else {
-               mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
-               mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
-       }
-       /* skip the flush */
-}
-
-static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
-                                            u64 qmask)
-{
-       u32 mask;
-
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
-       } else {
-               mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
-               mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
-       }
-       /* skip the flush */
-}
-
 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
 {
        struct ixgbe_q_vector *q_vector = data;
@@ -1685,28 +1500,21 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void 
*data)
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                tx_ring = &(adapter->tx_ring[r_idx]);
-               tx_ring->total_bytes = 0;
-               tx_ring->total_packets = 0;
-#ifndef CONFIG_IXGBE_NAPI
-               ixgbe_clean_tx_irq(q_vector, tx_ring);
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                        ixgbe_update_tx_dca(adapter, tx_ring);
 #endif
-#endif
+               tx_ring->total_bytes = 0;
+               tx_ring->total_packets = 0;
+               ixgbe_clean_tx_irq(adapter, tx_ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
        }
 
-#ifdef CONFIG_IXGBE_NAPI
-       /* disable interrupts on this vector only */
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
-       napi_schedule(&q_vector->napi);
-#endif
        /*
         * possibly later we can enable tx auto-adjustment if necessary
         *
-       if (adapter->itr_setting & 1)
+       if (adapter->itr_setting & 3)
                ixgbe_set_itr_msix(q_vector);
         */
 
@@ -1729,10 +1537,12 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void 
*data)
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
                rx_ring = &(adapter->rx_ring[r_idx]);
+               if (!rx_ring->active)
+                       continue;
                rx_ring->total_bytes = 0;
                rx_ring->total_packets = 0;
 #ifndef CONFIG_IXGBE_NAPI
-               ixgbe_clean_rx_irq(q_vector, rx_ring);
+               ixgbe_clean_rx_irq(adapter, rx_ring);
 
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
@@ -1743,7 +1553,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void 
*data)
                                      r_idx + 1);
        }
 
-       if (adapter->itr_setting & 1)
+       if (adapter->itr_setting & 3)
                ixgbe_set_itr_msix(q_vector);
 #else
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1753,9 +1563,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void 
*data)
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
 
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       rx_ring = &(adapter->rx_ring[r_idx]);
+       if (!rx_ring->active)
+               return IRQ_HANDLED;
        /* disable interrupts on this vector only */
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
-       napi_schedule(&q_vector->napi);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+       netif_rx_schedule(adapter->netdev, &q_vector->napi);
 #endif
 
        return IRQ_HANDLED;
@@ -1763,59 +1577,8 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void 
*data)
 
 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
 {
-       struct ixgbe_q_vector *q_vector = data;
-       struct ixgbe_adapter  *adapter = q_vector->adapter;
-       struct ixgbe_ring  *ring;
-       int r_idx;
-       int i;
-
-       if (!q_vector->txr_count && !q_vector->rxr_count)
-               return IRQ_HANDLED;
-
-       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->txr_count; i++) {
-               ring = &(adapter->tx_ring[r_idx]);
-               ring->total_bytes = 0;
-               ring->total_packets = 0;
-#ifndef CONFIG_IXGBE_NAPI
-               ixgbe_clean_tx_irq(q_vector, ring);
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_tx_dca(adapter, ring);
-#endif
-#endif
-               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
-
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rxr_count; i++) {
-               ring = &(adapter->rx_ring[r_idx]);
-               ring->total_bytes = 0;
-               ring->total_packets = 0;
-#ifndef CONFIG_IXGBE_NAPI
-               ixgbe_clean_rx_irq(q_vector, ring);
-
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_rx_dca(adapter, ring);
-
-#endif
-               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
-
-       if (adapter->itr_setting & 1)
-               ixgbe_set_itr_msix(q_vector);
-#else
-               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
-
-       /* disable interrupts on this vector only */
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
-       napi_schedule(&q_vector->napi);
-#endif
+       ixgbe_msix_clean_rx(irq, data);
+       ixgbe_msix_clean_tx(irq, data);
 
        return IRQ_HANDLED;
 }
@@ -1845,54 +1608,39 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, 
int budget)
                ixgbe_update_rx_dca(adapter, rx_ring);
 #endif
 
-       ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+       if (rx_ring->active)
+               ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
 
-#ifndef HAVE_NETDEV_NAPI_LIST
-       if (!netif_running(adapter->netdev))
-               work_done = 0;
-
-#endif
        /* If all Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->itr_setting & 1)
+       if ((work_done == 0) || !netif_running(adapter->netdev)) {
+               netif_rx_complete(adapter->netdev, napi);
+               if (adapter->itr_setting & 3)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter, ((u64)1 << 
q_vector->v_idx));
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, 
rx_ring->v_idx);
+               return 0;
        }
 
        return work_done;
 }
 
 /**
- * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
+ * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
  *
  * This function will clean more than one rx queue associated with a
  * q_vector.
  **/
-static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
+static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
 {
        struct ixgbe_q_vector *q_vector =
                               container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring *ring = NULL;
+       struct ixgbe_ring *rx_ring = NULL;
        int work_done = 0, i;
        long r_idx;
-       bool tx_clean_complete = true;
-
-       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->txr_count; i++) {
-               ring = &(adapter->tx_ring[r_idx]);
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_tx_dca(adapter, ring);
-#endif
-               tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
-               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
+       u16 enable_mask = 0;
 
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
@@ -1900,75 +1648,29 @@ static int ixgbe_clean_rxtx_many(struct napi_struct 
*napi, int budget)
        budget = max(budget, 1);
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               ring = &(adapter->rx_ring[r_idx]);
+               rx_ring = &(adapter->rx_ring[r_idx]);
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_rx_dca(adapter, ring);
+                       ixgbe_update_rx_dca(adapter, rx_ring);
 #endif
-               ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
+               if (rx_ring->active)
+                       ixgbe_clean_rx_irq(adapter, rx_ring,
+                                          &work_done, budget);
+               enable_mask |= rx_ring->v_idx;
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
                                      r_idx + 1);
        }
 
-       if (!tx_clean_complete)
-               work_done = budget;
-
-#ifndef HAVE_NETDEV_NAPI_LIST
-       if (!netif_running(adapter->netdev))
-               work_done = 0;
-
-#endif
-       /* If all Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->itr_setting & 1)
-                       ixgbe_set_itr_msix(q_vector);
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter, ((u64)1 << 
q_vector->v_idx));
-       }
-
-       return work_done;
-}
-
-/**
- * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
- **/
-static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
-{
-       struct ixgbe_q_vector *q_vector =
-                              container_of(napi, struct ixgbe_q_vector, napi);
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring *tx_ring = NULL;
-       int work_done = 0;
-       long r_idx;
-
-       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       tx_ring = &(adapter->tx_ring[r_idx]);
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_tx_dca(adapter, tx_ring);
-#endif
-
-       if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
-               work_done = budget;
-
-#ifndef HAVE_NETDEV_NAPI_LIST
-       if (!netif_running(adapter->netdev))
-               work_done = 0;
-
-#endif
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       rx_ring = &(adapter->rx_ring[r_idx]);
        /* If all Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->itr_setting & 1)
+       if ((work_done == 0) || !netif_running(adapter->netdev)) {
+               netif_rx_complete(adapter->netdev, napi);
+               if (adapter->itr_setting & 3)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter, ((u64)1 << 
q_vector->v_idx));
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+               return 0;
        }
 
        return work_done;
@@ -1978,24 +1680,26 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, 
int budget)
 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
                                      int r_idx)
 {
-       struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+       a->q_vector[v_idx].adapter = a;
+       set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
+       a->q_vector[v_idx].rxr_count++;
+       a->rx_ring[r_idx].v_idx = 1 << v_idx;
 
-       set_bit(r_idx, q_vector->rxr_idx);
-       q_vector->rxr_count++;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
-                                     int t_idx)
+                                     int r_idx)
 {
-       struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
-
-       set_bit(t_idx, q_vector->txr_idx);
-       q_vector->txr_count++;
+       a->q_vector[v_idx].adapter = a;
+       set_bit(r_idx, a->q_vector[v_idx].txr_idx);
+       a->q_vector[v_idx].txr_count++;
+       a->tx_ring[r_idx].v_idx = 1 << v_idx;
 }
 
 /**
  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
  * @adapter: board private structure to initialize
+ * @vectors: allotted vector count for descriptor rings
  *
  * This function maps descriptor rings to the queue-specific vectors
  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
@@ -2003,9 +1707,8 @@ static inline void map_vector_to_txq(struct ixgbe_adapter 
*a, int v_idx,
  * group the rings as "efficiently" as possible.  You would add new
  * mapping configurations in here.
  **/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, int 
vectors)
 {
-       int q_vectors;
        int v_start = 0;
        int rxr_idx = 0, txr_idx = 0;
        int rxr_remaining = adapter->num_rx_queues;
@@ -2018,18 +1721,17 @@ static int ixgbe_map_rings_to_vectors(struct 
ixgbe_adapter *adapter)
        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
                goto out;
 
-       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
        /*
         * The ideal configuration...
         * We have enough vectors to map one per queue.
         */
-       if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+       if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
                for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
                        map_vector_to_rxq(adapter, v_start, rxr_idx);
 
                for (; txr_idx < txr_remaining; v_start++, txr_idx++)
                        map_vector_to_txq(adapter, v_start, txr_idx);
+
                goto out;
        }
 
@@ -2039,16 +1741,16 @@ static int ixgbe_map_rings_to_vectors(struct 
ixgbe_adapter *adapter)
         * multiple queues per vector.
         */
        /* Re-adjusting *qpv takes care of the remainder. */
-       for (i = v_start; i < q_vectors; i++) {
-               rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+       for (i = v_start; i < vectors; i++) {
+               rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
                for (j = 0; j < rqpv; j++) {
                        map_vector_to_rxq(adapter, i, rxr_idx);
                        rxr_idx++;
                        rxr_remaining--;
                }
        }
-       for (i = v_start; i < q_vectors; i++) {
-               tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+       for (i = v_start; i < vectors; i++) {
+               tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
                for (j = 0; j < tqpv; j++) {
                        map_vector_to_txq(adapter, i, txr_idx);
                        txr_idx++;
@@ -2077,31 +1779,30 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter 
*adapter)
        /* Decrement for Other and TCP Timer vectors */
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)        \
-                                         ? &ixgbe_msix_clean_many : \
-                         (_v)->rxr_count ? &ixgbe_msix_clean_rx   : \
-                         (_v)->txr_count ? &ixgbe_msix_clean_tx   : \
-                         NULL)
+       /* Map the Tx/Rx rings to the vectors we were allotted. */
+       err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+       if (err)
+               goto out;
+
+#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
+                         (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+                         &ixgbe_msix_clean_many)
        for (vector = 0; vector < q_vectors; vector++) {
-               struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
-               handler = SET_HANDLER(q_vector);
+               handler = SET_HANDLER(&adapter->q_vector[vector]);
 
                if (handler == &ixgbe_msix_clean_rx) {
-                       sprintf(q_vector->name, "%s-%s-%d",
+                       sprintf(adapter->name[vector], "%s-%s-%d",
                                netdev->name, "rx", ri++);
                } else if (handler == &ixgbe_msix_clean_tx) {
-                       sprintf(q_vector->name, "%s-%s-%d",
+                       sprintf(adapter->name[vector], "%s-%s-%d",
                                netdev->name, "tx", ti++);
-               } else if (handler == &ixgbe_msix_clean_many) {
-                       sprintf(q_vector->name, "%s-%s-%d",
-                               netdev->name, "TxRx", vector);
                } else {
-                       /* skip this unused q_vector */
-                       continue;
+                       sprintf(adapter->name[vector], "%s-%s-%d",
+                               netdev->name, "TxRx", vector);
                }
                err = request_irq(adapter->msix_entries[vector].vector,
-                                 handler, 0, q_vector->name,
-                                 q_vector);
+                                 handler, 0, adapter->name[vector],
+                                 &(adapter->q_vector[vector]));
                if (err) {
                        DPRINTK(PROBE, ERR,
                                "request_irq failed for MSIX interrupt "
@@ -2110,9 +1811,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter 
*adapter)
                }
        }
 
-       sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
+       sprintf(adapter->name[vector], "%s:lsc", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
+                         &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
        if (err) {
                DPRINTK(PROBE, ERR,
                        "request_irq for msix_lsc failed: %d\n", err);
@@ -2121,9 +1822,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter 
*adapter)
 
 #ifdef IXGBE_TCP_TIMER
        vector++;
-       sprintf(adapter->tcp_timer_name, "%s:timer", netdev->name);
+       sprintf(adapter->name[vector], "%s:timer", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &ixgbe_msix_tcp_timer, 0, adapter->tcp_timer_name,
+                         &ixgbe_msix_tcp_timer, 0, adapter->name[vector],
                          netdev);
        if (err) {
                DPRINTK(PROBE, ERR,
@@ -2139,17 +1840,19 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter 
*adapter)
 free_queue_irqs:
        for (i = vector - 1; i >= 0; i--)
                free_irq(adapter->msix_entries[--vector].vector,
-                        adapter->q_vector[i]);
+                        &(adapter->q_vector[i]));
        adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
+out:
        return err;
 }
 
 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_q_vector *q_vector = adapter->q_vector;
        u8 current_itr;
        u32 new_itr = q_vector->eitr;
        struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
@@ -2182,14 +1885,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
        }
 
        if (new_itr != q_vector->eitr) {
-
+               u32 itr_reg;
                /* do an exponential smoothing */
                new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-
-               /* save the algorithm value here */
                q_vector->eitr = new_itr;
-
-               ixgbe_write_eitr(q_vector);
+               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+               /* must write high and low 16 bits to reset counter */
+               IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
        }
 
        return;
@@ -2199,117 +1901,70 @@ static void ixgbe_set_itr(struct ixgbe_adapter 
*adapter)
  * ixgbe_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
  **/
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool 
queues, bool flush)
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 {
        u32 mask;
-       u64 qmask;
-
-       mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
-       qmask = ~0;
-
-       /* don't reenable LSC while waiting for link */
-       if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
-               mask &= ~IXGBE_EIMS_LSC;
+       mask = IXGBE_EIMS_ENABLE_MASK;
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
                mask |= IXGBE_EIMS_GPI_SDP1;
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               mask |= IXGBE_EIMS_ECC;
-               mask |= IXGBE_EIMS_GPI_SDP1;
-               mask |= IXGBE_EIMS_GPI_SDP2;
-       }
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
-           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
-               mask |= IXGBE_EIMS_FLOW_DIR;
-
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       if (queues)
-               ixgbe_irq_enable_queues(adapter, qmask);
-       if (flush)
-               IXGBE_WRITE_FLUSH(&adapter->hw);
+       IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
+
 /**
  * ixgbe_intr - legacy mode Interrupt Handler
  * @irq: interrupt number
  * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
  **/
 static irqreturn_t ixgbe_intr(int irq, void *data)
 {
        struct net_device *netdev = data;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
        u32 eicr;
 
-       /*
-        * Workaround of Silicon errata on 82598.  Mask the interrupt
-        * before the read of EICR.
-        */
-       IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
-
        /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
         * therefore no explict interrupt disable is necessary */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
        if (!eicr) {
-               /*
-                * shared interrupt alert!
+#ifdef CONFIG_IXGBE_NAPI
+               /* shared interrupt alert!
                 * make sure interrupts are enabled because the read will
-                * have disabled interrupts due to EIAM
-                * finish the workaround of silicon errata on 82598.  Unmask
-                * the interrupt that we masked before the EICR read.
-                */
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable(adapter, true, true);
+                * have disabled interrupts due to EIAM */
+               ixgbe_irq_enable(adapter);
+#endif
                return IRQ_NONE;  /* Not our interrupt */
        }
 
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               if (eicr & IXGBE_EICR_ECC)
-                       DPRINTK(LINK, INFO, "Received unrecoverable ECC Err, "
-                                           "please reboot\n");
-               ixgbe_check_sfp_event(adapter, eicr);
-       }
-
        ixgbe_check_fan_failure(adapter, eicr);
 
 #ifdef CONFIG_IXGBE_NAPI
-       if (napi_schedule_prep(&(q_vector->napi))) {
+       if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
                adapter->tx_ring[0].total_packets = 0;
                adapter->tx_ring[0].total_bytes = 0;
                adapter->rx_ring[0].total_packets = 0;
                adapter->rx_ring[0].total_bytes = 0;
                /* would disable interrupts here but EIAM disabled it */
-               __napi_schedule(&(q_vector->napi));
+               __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
        }
 
-       /*
-        * re-enable link(maybe) and non-queue interrupts, no flush.
-        * ixgbe_poll will re-enable the queue interrupts
-        */
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable(adapter, false, false);
 #else
        adapter->tx_ring[0].total_packets = 0;
        adapter->tx_ring[0].total_bytes = 0;
        adapter->rx_ring[0].total_packets = 0;
        adapter->rx_ring[0].total_bytes = 0;
-       ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
-       ixgbe_clean_rx_irq(q_vector, adapter->rx_ring);
+       ixgbe_clean_rx_irq(adapter, adapter->rx_ring);
+       ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
 
        /* dynamically adjust throttle */
-       if (adapter->itr_setting & 1)
+       if (adapter->itr_setting & 3)
                ixgbe_set_itr(adapter);
 
-       /*
-        * Workaround of Silicon errata on 82598.  Unmask
-        * the interrupt that we masked before the EICR read
-        * no flush of the re-enable is necessary here
-        */
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable(adapter, true, false);
 #endif
        return IRQ_HANDLED;
 }
@@ -2319,12 +1974,11 @@ static inline void ixgbe_reset_q_vectors(struct 
ixgbe_adapter *adapter)
        int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        for (i = 0; i < q_vectors; i++) {
-               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+               struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
                bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
                bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
                q_vector->rxr_count = 0;
                q_vector->txr_count = 0;
-               q_vector->eitr = adapter->eitr_param;
        }
 }
 
@@ -2371,11 +2025,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter 
*adapter)
                i--;
 #endif
                free_irq(adapter->msix_entries[i].vector, netdev);
-               i--;
 
+               i--;
                for (; i >= 0; i--) {
                        free_irq(adapter->msix_entries[i].vector,
-                                adapter->q_vector[i]);
+                                &(adapter->q_vector[i]));
                }
 
                ixgbe_reset_q_vectors(adapter);
@@ -2390,13 +2044,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-       } else {
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
-       }
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                int i;
@@ -2407,6 +2055,13 @@ static inline void ixgbe_irq_disable(struct 
ixgbe_adapter *adapter)
        }
 }
 
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
+{
+       u32 mask = IXGBE_EIMS_RTX_QUEUE;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+       /* skip the flush */
+}
+
 /**
  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  *
@@ -2418,8 +2073,8 @@ static void ixgbe_configure_msi_and_legacy(struct 
ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
                        EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
 
-       ixgbe_set_ivar(adapter, 0, 0, 0);
-       ixgbe_set_ivar(adapter, 1, 0, 0);
+       ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
+       ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
 
        map_vector_to_rxq(adapter, 0, 0);
        map_vector_to_txq(adapter, 0, 0);
@@ -2435,7 +2090,7 @@ static void ixgbe_configure_msi_and_legacy(struct 
ixgbe_adapter *adapter)
  **/
 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 {
-       u64 tdba;
+       u64 tdba, tdwba;
        struct ixgbe_hw *hw = &adapter->hw;
        u32 i, j, tdlen, txctrl;
 
@@ -2448,6 +2103,11 @@ static void ixgbe_configure_tx(struct ixgbe_adapter 
*adapter)
                IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
                                (tdba & DMA_32BIT_MASK));
                IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
+               tdwba = ring->dma +
+                       (ring->count * sizeof(union ixgbe_adv_tx_desc));
+               tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
+               IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
                IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
                IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
                IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
@@ -2460,39 +2120,6 @@ static void ixgbe_configure_tx(struct ixgbe_adapter 
*adapter)
                txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
                IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
        }
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               u32 rttdcs;
-
-               /* disable the arbiter while setting MTQC */
-               rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-               rttdcs |= IXGBE_RTTDCS_ARBDIS;
-               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
-
-               /* set transmit pool layout */
-               switch (adapter->flags &
-                       (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED))
-               {
-
-               case (IXGBE_FLAG_VMDQ_ENABLED):
-                       IXGBE_WRITE_REG(hw, IXGBE_MTQC,
-                                       (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
-                       break;
-
-               case (IXGBE_FLAG_DCB_ENABLED):
-                       IXGBE_WRITE_REG(hw, IXGBE_MTQC,
-                                     (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
-                       break;
-
-               default:
-                       IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
-                       break;
-               }
-
-               /* re-eable the arbiter */
-               rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
-               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
-       }
 }
 
 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT        2
@@ -2501,41 +2128,24 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter 
*adapter, int index)
 {
        struct ixgbe_ring *rx_ring;
        u32 srrctl;
-       int queue0 = 0;
+       int queue0;
        unsigned long mask;
-       struct ixgbe_ring_feature *feature = adapter->ring_feature;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       int dcb_i = feature[RING_F_DCB].indices;
-                       if (dcb_i == 8)
-                               queue0 = index >> 4;
-                       else if (dcb_i == 4)
-                               queue0 = index >> 5;
-                       else
-                               DPRINTK(PROBE, ERR, "Invalid DCB 
configuration");
-               } else {
-                       queue0 = index;
-               }
+       /* program one srrctl register per VMDq index */
+       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+               long shift, len;
+               mask = (unsigned long) adapter->ring_feature[RING_F_VMDQ].mask;
+               len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
+               shift = find_first_bit(&mask, len);
+               queue0 = (index & mask);
+               index = (index & mask) >> shift;
+       /* if VMDq is not active we must program one srrctl register per
+        * RSS queue since we have enabled RDRXCTL.MVMEN
+        */
        } else {
-               /* program one srrctl register per VMDq index */
-               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
-                       long shift, len;
-                       mask = (unsigned long) feature[RING_F_VMDQ].mask;
-                       len = sizeof(feature[RING_F_VMDQ].mask) * 8;
-                       shift = find_first_bit(&mask, len);
-                       queue0 = (index & mask);
-                       index = (index & mask) >> shift;
-               } else {
-                       /*
-                        * if VMDq is not active we must program one srrctl
-                        * register per RSS queue since we have enabled
-                        * RDRXCTL.MVMEN
-                        */
-                       mask = (unsigned long) feature[RING_F_RSS].mask;
-                       queue0 = index & mask;
-                       index = index & mask;
-               }
+               mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+               queue0 = index & mask;
+               index = index & mask;
        }
 
        rx_ring = &adapter->rx_ring[queue0];
@@ -2545,63 +2155,54 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter 
*adapter, int index)
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
        srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 
-       srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                  IXGBE_SRRCTL_BSIZEHDR_MASK;
-
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
-               srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
-               srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
+               srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
                srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+               srrctl |= ((IXGBE_RX_HDR_SIZE <<
+                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                          IXGBE_SRRCTL_BSIZEHDR_MASK);
        } else {
-               srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
-                         IXGBE_SRRCTL_BSIZEPKT_SHIFT;
                srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+               if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+                       srrctl |= IXGBE_RXBUFFER_2048 >>
+                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+               else
+                       srrctl |= rx_ring->rx_buf_len >>
+                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
        }
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
 }
 
-
-static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+#ifndef IXGBE_NO_INET_LRO
+/**
+ * ixgbe_get_skb_hdr - helper function for LRO header processing
+ * @skb: pointer to sk_buff to be added to LRO packet
+ * @iphdr: pointer to ip header structure
+ * @tcph: pointer to tcp header structure
+ * @hdr_flags: pointer to header flags
+ * @priv: private data
+ **/
+static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
+                             u64 *hdr_flags, void *priv)
 {
-       u32 mrqc = 0;
-       int mask;
-
-       if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
-               return mrqc;
+       union ixgbe_adv_rx_desc *rx_desc = priv;
 
-       mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
-                                | IXGBE_FLAG_DCB_ENABLED
-                                | IXGBE_FLAG_VMDQ_ENABLED
-                               );
-
-       switch (mask) {
-       case (IXGBE_FLAG_RSS_ENABLED):
-               mrqc = IXGBE_MRQC_RSSEN;
-               break;
-       case (IXGBE_FLAG_VMDQ_ENABLED):
-               mrqc = IXGBE_MRQC_VMDQEN;
-               break;
-       case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
-               if (adapter->ring_feature[RING_F_RSS].indices == 4)
-                       mrqc = IXGBE_MRQC_VMDQRSS32EN;
-               else if (adapter->ring_feature[RING_F_RSS].indices == 2)
-                       mrqc = IXGBE_MRQC_VMDQRSS64EN;
-               else
-                       mrqc = IXGBE_MRQC_VMDQEN;
-               break;
-       case (IXGBE_FLAG_DCB_ENABLED):
-               mrqc = IXGBE_MRQC_RT8TCEN;
-               break;
-       default:
-               break;
-       }
+       /* Verify that this is a valid IPv4 TCP packet */
+       if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
+            (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
+               return -1;
 
-       return mrqc;
+       /* Set network headers */
+       skb_reset_network_header(skb);
+       skb_set_transport_header(skb, ip_hdrlen(skb));
+       *iphdr = ip_hdr(skb);
+       *tcph = tcp_hdr(skb);
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+       return 0;
 }
 
+#endif /* IXGBE_NO_INET_LRO */
 /**
  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  * @adapter: board private structure
@@ -2620,15 +2221,18 @@ static void ixgbe_configure_rx(struct ixgbe_adapter 
*adapter)
                          0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
                          0x6A3E67EA, 0x14364D17, 0x3BED200D};
        u32 fctrl, hlreg0;
-       u32 reta = 0, mrqc = 0;
+       u32 reta = 0, mrqc;
        u32 vmdctl;
-       int pool;
        u32 rdrxctl;
-#ifndef IXGBE_NO_HW_RSC
-       u32 rscctrl;
-#endif /* IXGBE_NO_HW_RSC */
        int rx_buf_len;
 
+#ifndef IXGBE_NO_LRO
+       adapter->lro_data.max = lromax;
+
+       if (lromax * netdev->mtu > (1 << 16))
+               adapter->lro_data.max = ((1 << 16) / netdev->mtu) - 1;
+
+#endif
        /* Decide whether to use packet split mode or not */
        if (netdev->mtu > ETH_DATA_LEN) {
                if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
@@ -2645,22 +2249,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter 
*adapter)
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
                rx_buf_len = IXGBE_RX_HDR_SIZE;
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       /* PSRTYPE must be initialized in 82599 */
-                       u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
-                                     IXGBE_PSRTYPE_UDPHDR |
-                                     IXGBE_PSRTYPE_IPV4HDR |
-                                     IXGBE_PSRTYPE_IPV6HDR |
-                                     IXGBE_PSRTYPE_L2HDR;
-                       IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
-               }
        } else {
-#ifndef IXGBE_NO_HW_RSC
-               if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
-                   (netdev->mtu <= ETH_DATA_LEN))
-#else
                if (netdev->mtu <= ETH_DATA_LEN)
-#endif /* IXGBE_NO_HW_RSC */
                        rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
                else
                        rx_buf_len = ALIGN(max_frame, 1024);
@@ -2679,37 +2269,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter 
*adapter)
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
+       rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
        /* disable receives while setting up the descriptors */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
-       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
-           (hw->mac.type == ixgbe_mac_82599EB)) {
-               int pool;
-               for (pool = 0; pool < adapter->num_rx_pools; pool++) {
-                       u32 vmolr;
-
-                       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-                               u32 psrtype = IXGBE_READ_REG(hw, 
IXGBE_PSRTYPE(pool));
-                               psrtype |= (adapter->num_rx_queues_per_pool << 
29);
-                               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(pool), 
psrtype);
-                       }
-
-                       /*
-                        * accept untagged packets until a vlan tag
-                        * is specifically set for the VMDQ queue/pool
-                        */
-                       vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-                       vmolr |= IXGBE_VMOLR_AUPE;
-                       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-               }
-       }
-
-       rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
-       /*
-        * Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring
-        */
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
+        * the Base and Length of the Rx Descriptor Ring */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rdba = adapter->rx_ring[i].dma;
                j = adapter->rx_ring[i].reg_idx;
@@ -2720,51 +2286,60 @@ static void ixgbe_configure_rx(struct ixgbe_adapter 
*adapter)
                IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
                adapter->rx_ring[i].head = IXGBE_RDH(j);
                adapter->rx_ring[i].tail = IXGBE_RDT(j);
-               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
 
+#ifndef CONFIG_XEN_NETDEV2_VMQ
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+                       /* Reserve VMDq set 1 for FCoE, using 3k buffers */
+                       if ((i & adapter->ring_feature[RING_F_VMDQ].mask) == 1)
+                               adapter->rx_ring[i].rx_buf_len = 3072;
+                       else
+                               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+               } else {
+                       adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+               }
+#else
+                       adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+#endif /* CONFIG_XEN_NETDEV2_VMQ */
+
+#ifndef IXGBE_NO_INET_LRO
+               /* Intitial LRO Settings */
+               adapter->rx_ring[i].lro_mgr.max_aggr = adapter->lro_max_aggr;
+               adapter->rx_ring[i].lro_mgr.max_desc = 
IXGBE_MAX_LRO_DESCRIPTORS;
+               adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
+               adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
+#ifdef CONFIG_IXGBE_NAPI
+               if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+                       adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
+#endif
+               adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
+               adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+               adapter->rx_ring[i].lro_mgr.ip_summed_aggr = 
CHECKSUM_UNNECESSARY;
+
+#endif
                ixgbe_configure_srrctl(adapter, j);
        }
 
-       if (hw->mac.type == ixgbe_mac_82598EB) {
-               /*
-                * For VMDq support of different descriptor types or
-                * buffer sizes through the use of multiple SRRCTL
-                * registers, RDRXCTL.MVMEN must be set to 1
-                *
-                * also, the manual doesn't mention it clearly but DCA hints
-                * will only use queue 0's tags unless this bit is set.  Side
-                * effects of setting this bit are only that SRRCTL must be
-                * fully programmed [0..15]
-                */
-               rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-               rdrxctl |= IXGBE_RDRXCTL_MVMEN;
-               IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-       }
+       /*
+        * For VMDq support of different descriptor types or
+        * buffer sizes through the use of multiple SRRCTL
+        * registers, RDRXCTL.MVMEN must be set to 1
+        *
+        * also, the manual doesn't mention it clearly but DCA hints
+        * will only use queue 0's tags unless this bit is set.  Side
+        * effects of setting this bit are only that SRRCTL must be
+        * fully programmed [0..15]
+        */
+       rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+       rdrxctl |= IXGBE_RDRXCTL_MVMEN;
+       IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
 
        if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
-               u32 vt_reg;
-               u32 vt_reg_bits;
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       vt_reg = IXGBE_VT_CTL;
-                       vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
-                                       | IXGBE_VT_CTL_REPLEN;
-               } else {
-                       vt_reg = IXGBE_VMD_CTL;
-                       vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN;
-               }
-               vmdctl = IXGBE_READ_REG(hw, vt_reg);
-               IXGBE_WRITE_REG(hw, vt_reg, vmdctl | vt_reg_bits);
                IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
-
-               IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0xFFFFFFFF);
-               IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0xFFFFFFFF);
-               IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0xFFFFFFFF);
-               IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0xFFFFFFFF);
+               vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+               IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL,
+                               vmdctl | IXGBE_VMD_CTL_VMDQ_EN);
        }
 
-       /* Program MRQC for the distribution of queues */
-       mrqc = ixgbe_setup_mrqc(adapter);
-
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
                /* Fill out redirection table */
                for (i = 0, j = 0; i < 128; i++, j++) {
@@ -2781,17 +2356,19 @@ static void ixgbe_configure_rx(struct ixgbe_adapter 
*adapter)
                for (i = 0; i < 10; i++)
                        IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
 
-               if (hw->mac.type == ixgbe_mac_82598EB)
-                       mrqc |= IXGBE_MRQC_RSSEN;
+               mrqc = IXGBE_MRQC_RSSEN
                    /* Perform hash on these packet types */
-               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
-                     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
-                     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
-                     | IXGBE_MRQC_RSS_FIELD_IPV6
-                     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
-                     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+                      | IXGBE_MRQC_RSS_FIELD_IPV4
+                      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+                      | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+                      | IXGBE_MRQC_RSS_FIELD_IPV6
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
        }
-       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
        rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 
@@ -2808,71 +2385,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter 
*adapter)
        }
 
        IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-#ifndef IXGBE_NO_HW_RSC
-               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
-                       rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
-#endif /* IXGBE_NO_HW_RSC */
-               rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
-               IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-       }
-
-#ifndef IXGBE_NO_HW_RSC
-       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
-               /* Enable 82599 HW RSC */
-               for (i = 0; i < adapter->num_rx_queues; i++) {
-                       j = adapter->rx_ring[i].reg_idx;
-                       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
-                       rscctrl |= IXGBE_RSCCTL_RSCEN;
-                       /*
-                        * we must limit the number of descriptors so that
-                        * the total size of max desc * buf_len is not greater
-                        * than 65535
-                        */
-                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-#if (MAX_SKB_FRAGS > 16)
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#else
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-#endif
-                       } else {
-                               if (rx_buf_len < IXGBE_RXBUFFER_4096)
-                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-                               else if (rx_buf_len < IXGBE_RXBUFFER_8192)
-                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-                               else
-                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-                       }
-
-                       if (adapter->num_rx_queues_per_pool == 1)
-                               pool = j / 2;
-                       else
-                               pool = j / adapter->num_rx_queues_per_pool;
-
-                       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
-                               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(pool),
-                                     (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(pool)) |
-                                      IXGBE_PSRTYPE_TCPHDR));
-
-                       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
-
-               }
-               /* Enable TCP header recognition in PSRTYPE */
-               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
-                       (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
-                        IXGBE_PSRTYPE_TCPHDR));
-
-               /* Disable RSC for ACK packets */
-               IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
-                  (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
-       }
-#endif /* IXGBE_NO_HW_RSC */
 }
 
 #ifdef NETIF_F_HW_VLAN_TX
@@ -2881,55 +2393,43 @@ static void ixgbe_vlan_rx_register(struct net_device 
*netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        u32 ctrl;
-       int i, j;
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_disable(adapter);
        adapter->vlgrp = grp;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               /* always enable VLAN tag insert/strip */
-               ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-               ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
-               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
-       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       /*
+        * For a DCB driver, always enable VLAN tag stripping so we can
+        * still receive traffic from a DCB-enabled host.
+        */
+       ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+       ctrl |= IXGBE_VLNCTRL_VME;
+       ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+
+       if (grp) {
                /* enable VLAN tag insert/strip */
                ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-               ctrl |= IXGBE_VLNCTRL_VFE;
+               ctrl |= IXGBE_VLNCTRL_VME;
                ctrl &= ~IXGBE_VLNCTRL_CFIEN;
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
-               for (i = 0; i < adapter->num_rx_queues; i++) {
-                       j = adapter->rx_ring[i].reg_idx;
-                       ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
-                       ctrl |= IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
-               }
        }
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable(adapter, true, true);
+               ixgbe_irq_enable(adapter);
 }
 
 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       int i;
 #ifndef HAVE_NETDEV_VLAN_FEATURES
        struct net_device *v_netdev;
 #endif /* HAVE_NETDEV_VLAN_FEATURES */
 
        /* add VID to filter table */
-       if (hw->mac.ops.set_vfta) {
+       if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, true);
-               if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
-                   (adapter->hw.mac.type == ixgbe_mac_82599EB)) {
-                       /* enable vlan id for all pools */
-                       for (i = 1; i < adapter->num_rx_pools; i++)
-                               hw->mac.ops.set_vfta(hw, vid, i, true);
-               }
-       }
 #ifndef HAVE_NETDEV_VLAN_FEATURES
        /*
         * Copy feature flags from netdev to the vlan netdev for this vid.
@@ -2945,11 +2445,6 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device 
*netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       int i;
-
-       /* User is not allowed to remove vlan ID 0 */
-       if (!vid)
-               return;
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_disable(adapter);
@@ -2957,29 +2452,16 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device 
*netdev, u16 vid)
        vlan_group_set_device(adapter->vlgrp, vid, NULL);
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable(adapter, true, true);
+               ixgbe_irq_enable(adapter);
        /* remove VID from filter table */
-       if (hw->mac.ops.set_vfta) {
+       if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, false);
-               if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
-                   (adapter->hw.mac.type == ixgbe_mac_82599EB)) {
-                       /* remove vlan id from all pools */
-                       for (i = 1; i < adapter->num_rx_pools; i++)
-                               hw->mac.ops.set_vfta(hw, vid, i, false);
-               }
-       }
 }
 
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-
        ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
 
-       /* add vlan ID 0 so we always accept priority-tagged traffic */
-       if (hw->mac.ops.set_vfta)
-               hw->mac.ops.set_vfta(hw, 0, 0, true);
-
        if (adapter->vlgrp) {
                u16 vid;
                for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
@@ -2991,11 +2473,44 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter 
*adapter)
 }
 
 #endif
+#ifndef CONFIG_XEN_NETDEV2_VMQ
+/**
+ * compare_ether_oui - Compare two OUIs
+ * @addr1: pointer to a 6 byte array containing an Ethernet address
+ * @addr2: pointer to a 6 byte array containing an Ethernet address
+ *
+ * Compare the Organizationally Unique Identifiers from two Ethernet addresses,
+ * returns 0 if equal
+ */
+static inline int compare_ether_oui(const u8 *a, const u8 *b)
+{
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+
+/**
+ * is_fcoe_ether_addr - Compare an Ethernet address to FCoE OUI
+ * @addr1: pointer to a 6 byte array containing an Ethernet address
+ * @addr2: pointer to a 6 byte array containing an Ethernet address
+ *
+ * Compare the Organizationally Unique Identifier from an Ethernet addresses
+ * with the well known Fibre Channel over Ethernet OUI
+ *
+ * Returns 1 if the address has an FCoE OUI
+ */
+static inline int is_fcoe_ether_addr(const u8 *addr)
+{
+       static const u8 fcoe_oui[] = { 0x0e, 0xfc, 0x00 };
+       return compare_ether_oui(addr, fcoe_oui) == 0;
+}
+#endif /* CONFIG_XEN_NETDEV2_VMQ */
+
 static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 
*vmdq)
 {
+#ifndef CONFIG_XEN_NETDEV2_VMQ
+       struct ixgbe_adapter *adapter = hw->back;
+#endif
        struct dev_mc_list *mc_ptr;
        u8 *addr = *mc_addr_ptr;
-
        *vmdq = 0;
 
        mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
@@ -3003,7 +2518,27 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 
**mc_addr_ptr, u32 *vmdq)
                *mc_addr_ptr = mc_ptr->next->dmi_addr;
        else
                *mc_addr_ptr = NULL;
-
+#ifndef CONFIG_XEN_NETDEV2_VMQ
+       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+               /* VMDQ set 1 is used for FCoE */
+               if (adapter->ring_feature[RING_F_VMDQ].indices)
+                       *vmdq = is_fcoe_ether_addr(addr) ? 1 : 0;
+               if (*vmdq == 1) {
+                       u32 hlreg0, mhadd;
+
+                       /* Make sure that jumbo frames are enabled */
+                       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+                       hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+                       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+                       /* set the max frame size to pass receive filtering */
+                       mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+                       mhadd &= IXGBE_MHADD_MFS_MASK;
+                       mhadd |= 3072 << IXGBE_MHADD_MFS_SHIFT;
+                       IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+               }
+       }
+#endif
        return addr;
 }
 
@@ -3042,7 +2577,6 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
                }
                vlnctrl |= IXGBE_VLNCTRL_VFE;
                hw->addr_ctrl.user_set_promisc = 0;
-               fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
        }
 
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
@@ -3080,20 +2614,17 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter 
*adapter)
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                struct napi_struct *napi;
-               q_vector = adapter->q_vector[q_idx];
+               q_vector = &adapter->q_vector[q_idx];
+               if (!q_vector->rxr_count)
+                       continue;
                napi = &q_vector->napi;
-               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-                       if (!q_vector->rxr_count || !q_vector->txr_count) {
-                               if (q_vector->txr_count == 1)
-                                       napi->poll = &ixgbe_clean_txonly;
-                               else if (q_vector->rxr_count == 1)
-                                       napi->poll = &ixgbe_clean_rxonly;
-                       }
-               }
+               if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
+                   (q_vector->rxr_count > 1))
+                       napi->poll = &ixgbe_clean_rxonly_many;
 
                napi_enable(napi);
        }
-#endif /* CONFIG_IXGBE_NAPI */
+#endif
 }
 
 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
@@ -3108,7 +2639,9 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter 
*adapter)
                q_vectors = 1;
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               q_vector = adapter->q_vector[q_idx];
+               q_vector = &adapter->q_vector[q_idx];
+               if (!q_vector->rxr_count)
+                       continue;
                napi_disable(&q_vector->napi);
        }
 #endif
@@ -3151,88 +2684,14 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter 
*adapter)
        }
        /* Enable VLAN tag insert/strip */
        vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       if (hw->mac.type == ixgbe_mac_82598EB) {
-               vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
-               vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-       } else if (hw->mac.type == ixgbe_mac_82599EB) {
-               vlnctrl |= IXGBE_VLNCTRL_VFE;
-               vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-               for (i = 0; i < adapter->num_rx_queues; i++) {
-                       j = adapter->rx_ring[i].reg_idx;
-                       vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
-                       vlnctrl |= IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
-               }
-       }
+       vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+       vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, 0, 0, true);
 }
 
 #ifndef IXGBE_NO_LLI
-static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter)
-{
-       u16 port;
-
-       if (adapter->lli_etype) {
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
-                               (IXGBE_IMIR_LLI_EN_82599 | 
IXGBE_IMIR_SIZE_BP_82599 |
-                                IXGBE_IMIR_CTRL_BP_82599));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0),
-                               (adapter->lli_etype | IXGBE_ETQF_FILTER_EN));
-       }
-
-       if (adapter->lli_port) {
-               port = ntohs((u16)adapter->lli_port);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
-                               (IXGBE_IMIR_LLI_EN_82599 | 
IXGBE_IMIR_SIZE_BP_82599 |
-                                IXGBE_IMIR_CTRL_BP_82599));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
-                               (IXGBE_FTQF_POOL_MASK_EN |
-                                (IXGBE_FTQF_PRIORITY_MASK <<
-                                 IXGBE_FTQF_PRIORITY_SHIFT) |
-                                (IXGBE_FTQF_DEST_PORT_MASK <<
-                                 IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16));
-       }
-
-       if (adapter->flags & IXGBE_FLAG_LLI_PUSH) {
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
-                               (IXGBE_IMIR_LLI_EN_82599 | 
IXGBE_IMIR_SIZE_BP_82599 |
-                                IXGBE_IMIR_CTRL_PSH_82599 | 
IXGBE_IMIR_CTRL_SYN_82599 |
-                                IXGBE_IMIR_CTRL_URG_82599 | 
IXGBE_IMIR_CTRL_ACK_82599 |
-                                IXGBE_IMIR_CTRL_RST_82599 | 
IXGBE_IMIR_CTRL_FIN_82599));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
-                               (IXGBE_FTQF_POOL_MASK_EN |
-                                (IXGBE_FTQF_PRIORITY_MASK <<
-                                 IXGBE_FTQF_PRIORITY_SHIFT) |
-                                (IXGBE_FTQF_5TUPLE_MASK_MASK <<
-                                 IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
-
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, 0xfc000000);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100);
-       }
-
-       if (adapter->lli_size) {
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
-                               (IXGBE_IMIR_LLI_EN_82599 | 
IXGBE_IMIR_CTRL_BP_82599));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, 
adapter->lli_size);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
-                               (IXGBE_FTQF_POOL_MASK_EN |
-                                (IXGBE_FTQF_PRIORITY_MASK <<
-                                 IXGBE_FTQF_PRIORITY_SHIFT) |
-                                (IXGBE_FTQF_5TUPLE_MASK_MASK <<
-                                 IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
-       }
-
-       if (adapter->lli_vlan_pri) {
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP,
-                               (IXGBE_IMIRVP_PRIORITY_EN | 
adapter->lli_vlan_pri));
-       }
-}
-
 static void ixgbe_configure_lli(struct ixgbe_adapter *adapter)
 {
        u16 port;
@@ -3270,7 +2729,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        int i;
-       struct ixgbe_hw *hw = &adapter->hw;
 
        ixgbe_set_rx_mode(netdev);
 
@@ -3284,116 +2742,12 @@ static void ixgbe_configure(struct ixgbe_adapter 
*adapter)
                netif_set_gso_max_size(netdev, 65536);
        }
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-               ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
-       else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
-               ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
-
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = &adapter->rx_ring[i];
-               ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
-       }
-}
-
-static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
-       switch (hw->phy.type) {
-       case ixgbe_phy_sfp_avago:
-       case ixgbe_phy_sfp_ftl:
-       case ixgbe_phy_sfp_intel:
-       case ixgbe_phy_sfp_unknown:
-       case ixgbe_phy_tw_tyco:
-       case ixgbe_phy_tw_unknown:
-               return true;
-       default:
-               return false;
-       }
-}
-
-/**
- * ixgbe_sfp_link_config - set up SFP+ link
- * @adapter: pointer to private adapter struct
- **/
-static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-
-               if (hw->phy.multispeed_fiber) {
-                       /*
-                        * In multispeed fiber setups, the device may not have
-                        * had a physical connection when the driver loaded.
-                        * If that's the case, the initial link configuration
-                        * couldn't get the MAC into 10G or 1G mode, so we'll
-                        * never have a link status change interrupt fire.
-                        * We need to try and force an autonegotiation
-                        * session, then bring up link.
-                        */
-                       hw->mac.ops.setup_sfp(hw);
-                       if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
-                               schedule_work(&adapter->multispeed_fiber_task);
-               } else {
-                       /*
-                        * Direct Attach Cu and non-multispeed fiber modules
-                        * still need to be configured properly prior to
-                        * attempting link.
-                        */
-                       if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
-                               schedule_work(&adapter->sfp_config_module_task);
-               }
-}
-
-/**
- * ixgbe_non_sfp_link_config - set up non-SFP+ link
- * @hw: pointer to private hardware struct
- *
- * Returns 0 on success, negative on failure
- **/
-static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
-{
-       u32 autoneg;
-       bool link_up = false;
-       u32 ret = IXGBE_ERR_LINK_SETUP;
-
-       if (hw->mac.ops.check_link)
-               ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
-
-       if (ret)
-               goto link_cfg_out;
-
-       if (hw->mac.ops.get_link_capabilities)
-               ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
-                                                       &hw->mac.autoneg);
-       if (ret)
-               goto link_cfg_out;
-
-       if (hw->mac.ops.setup_link_speed)
-               ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
-link_cfg_out:
-       return ret;
-}
-
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
-                                             int rxr)
-{
-       int j = adapter->rx_ring[rxr].reg_idx;
-       int k;
-
-       for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
-               if (IXGBE_READ_REG(&adapter->hw,
-                                  IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
-                       break;
-               else
-                       msleep(1);
-       }
-       if (k >= IXGBE_MAX_RX_DESC_POLL) {
-               DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
-                       "not set within the polling period\n", rxr);
-       }
-       ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
-                             (adapter->rx_ring[rxr].count - 1));
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               if (adapter->rx_ring[i].active)
+                       ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
+                                              
IXGBE_DESC_UNUSED(&adapter->rx_ring[i]));
 }
 
 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -3401,14 +2755,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter 
*adapter)
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
        int i, j = 0;
-       int num_rx_rings = adapter->num_rx_queues;
        int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-       int err;
 #ifdef IXGBE_TCP_TIMER
        u32 tcp_timer;
 #endif
        u32 txdctl, rxdctl, mhadd;
-       u32 dmatxctl;
        u32 gpie;
 
        ixgbe_get_hw_control(adapter);
@@ -3450,20 +2801,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter 
*adapter)
        }
 
 #endif
-       /* Enable fan failure interrupt */
+       /* Enable fan failure interrupt if media type is copper */
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
                gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
                gpie |= IXGBE_SDP1_GPIEN;
                IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
        }
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
-               gpie |= IXGBE_SDP1_GPIEN;
-               gpie |= IXGBE_SDP2_GPIEN;
-               IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-       }
-
        mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
        if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
                mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3477,42 +2821,25 @@ static int ixgbe_up_complete(struct ixgbe_adapter 
*adapter)
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                /* enable WTHRESH=8 descriptors, to encourage burst writeback */
                txdctl |= (8 << 16);
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
-       }
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               /* DMATXCTL.EN must be set after all Tx queue config is done */
-               dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
-               dmatxctl |= IXGBE_DMATXCTL_TE;
-               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
-       }
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                txdctl |= IXGBE_TXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
        }
 
-       for (i = 0; i < num_rx_rings; i++) {
+       for (i = 0; i < adapter->num_rx_queues; i++) {
                j = adapter->rx_ring[i].reg_idx;
                rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
                /* enable PTHRESH=32 descriptors (half the internal cache)
                 * and HTHRESH=0 descriptors (to minimize latency on fetch),
                 * this also removes a pesky rx_no_buffer_count increment */
                rxdctl |= 0x0020;
-               rxdctl |= IXGBE_RXDCTL_ENABLE;
+               if (adapter->rx_ring[i].active)
+                       rxdctl |= IXGBE_RXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
-               if (hw->mac.type == ixgbe_mac_82599EB)
-                       ixgbe_rx_desc_queue_enable(adapter, i);
        }
        /* enable all receives */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
-       else
-               rxdctl |= IXGBE_RXCTRL_RXEN;
-       ixgbe_enable_rx_dma(hw, rxdctl);
+       rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
 
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
                ixgbe_configure_msix(adapter);
@@ -3521,51 +2848,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter 
*adapter)
 #ifndef IXGBE_NO_LLI
        /* lli should only be enabled with MSI-X and MSI */
        if (adapter->flags & IXGBE_FLAG_MSI_ENABLED ||
-           adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-               ixgbe_configure_lli_82599(adapter);
-       else
+           adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
                ixgbe_configure_lli(adapter);
-       }
-
 #endif
+
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
 
-       /*
-        * For hot-pluggable SFP+ devices, a new SFP+ module may have
-        * arrived before interrupts were enabled.  We need to kick off
-        * the SFP+ module setup first, then try to bring up link.
-        * If we're not hot-pluggable SFP+, we just need to configure link
-        * and bring it up.
-        */
-       err = hw->phy.ops.identify_sfp(hw);
-       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               DPRINTK(PROBE, ERR, "failed to load because an "
-                       "unsupported SFP+ module type was detected.\n");
-               ixgbe_down(adapter);
-               return err;
-       }
-
-       if (ixgbe_is_sfp(hw)) {
-               ixgbe_sfp_link_config(adapter);
-       } else {
-               err = ixgbe_non_sfp_link_config(hw);
-               if (err)
-                       DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
-       }
+       /* clear any pending interrupts, may auto mask */
+       IXGBE_READ_REG(hw, IXGBE_EICR);
 
-       /* enable transmits */
-       netif_tx_start_all_queues(netdev);
+       ixgbe_irq_enable(adapter);
 
        /* bring the link up in the watchdog, this could race with our first
         * link up interrupt but shouldn't be a problem */
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
        adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->watchdog_timer, jiffies);
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               set_bit(__IXGBE_FDIR_INIT_DONE,
-                       &(adapter->tx_ring[i].reinit_state));
        return 0;
 }
 
@@ -3581,44 +2880,16 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
 
 int ixgbe_up(struct ixgbe_adapter *adapter)
 {
-       int err;
-       struct ixgbe_hw *hw = &adapter->hw;
-
        ixgbe_configure(adapter);
 
-       err = ixgbe_up_complete(adapter);
-
-       /* clear any pending interrupts, may auto mask */
-       IXGBE_READ_REG(hw, IXGBE_EICR);
-       ixgbe_irq_enable(adapter, true, true);
-
-       return err;
+       return ixgbe_up_complete(adapter);
 }
 
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int err;
-
-       err = hw->mac.ops.init_hw(hw);
-       switch (err) {
-       case 0:
-       case IXGBE_ERR_SFP_NOT_PRESENT:
-               break;
-       case IXGBE_ERR_MASTER_REQUESTS_PENDING:
-               DPRINTK(HW, INFO, "master disable timed out\n");
-               break;
-       case IXGBE_ERR_EEPROM_VERSION:
-               /* We are running on a pre-production device, log a warning */
-               DPRINTK(PROBE, INFO, "This device is a pre-production adapter/"
-                       "LOM.  Please be aware there may be issues associated "
-                       "with your hardware.  If you are experiencing problems "
-                       "please contact your Intel or hardware representative "
-                       "who provided you with this hardware.\n");
-               break;
-       default:
-               DPRINTK(PROBE, ERR, "Hardware Error: %d\n", err);
-       }
+       if (hw->mac.ops.init_hw(hw))
+               DPRINTK(PROBE, ERR, "Hardware Error\n");
 
        /* reprogram the RAR[0] in case user changed it. */
        if (hw->mac.ops.set_rar)
@@ -3643,21 +2914,28 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter 
*adapter,
                struct ixgbe_rx_buffer *rx_buffer_info;
 
                rx_buffer_info = &rx_ring->rx_buffer_info[i];
+               if (rx_buffer_info->skb) {
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+                       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+                            rx_ring->queue_index) {
+                               pci_unmap_page(pdev, rx_buffer_info->dma,
+                                              PAGE_SIZE,
+                                              PCI_DMA_FROMDEVICE);
+                               vmq_free_skb(rx_buffer_info->skb,
+                                            rx_ring->queue_index);
+                               rx_buffer_info->dma = 0;
+                       } else
+#endif
+                               dev_kfree_skb(rx_buffer_info->skb);
+                       rx_buffer_info->skb = NULL;
+               }
+
                if (rx_buffer_info->dma) {
                        pci_unmap_single(pdev, rx_buffer_info->dma,
-                                        rx_ring->rx_buf_len,
+                                        rx_ring->rx_buf_len + NET_IP_ALIGN,
                                         PCI_DMA_FROMDEVICE);
                        rx_buffer_info->dma = 0;
                }
-               if (rx_buffer_info->skb) {
-                       struct sk_buff *skb = rx_buffer_info->skb;
-                       rx_buffer_info->skb = NULL;
-                       do {
-                               struct sk_buff *this = skb;
-                               skb = skb->prev;
-                               dev_kfree_skb(this);
-                       } while (skb);
-               }
                if (!rx_buffer_info->page)
                        continue;
                pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
@@ -3677,10 +2955,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter 
*adapter,
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       if (rx_ring->head)
-               writel(0, adapter->hw.hw_addr + rx_ring->head);
-       if (rx_ring->tail)
-               writel(0, adapter->hw.hw_addr + rx_ring->tail);
+       writel(0, adapter->hw.hw_addr + rx_ring->head);
+       writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
@@ -3710,10 +2986,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter 
*adapter,
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 
-       if (tx_ring->head)
-               writel(0, adapter->hw.hw_addr + tx_ring->head);
-       if (tx_ring->tail)
-               writel(0, adapter->hw.hw_addr + tx_ring->tail);
+       writel(0, adapter->hw.hw_addr + tx_ring->head);
+       writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -3772,9 +3046,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
         * holding */
        while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
                msleep(1);
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
-           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
-               cancel_work_sync(&adapter->fdir_reinit_task);
 
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -3783,11 +3054,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
        }
-       /* Disable the Tx DMA engine on 82599 */
-       if (hw->mac.type == ixgbe_mac_82599EB)
-               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
-                               (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
-                                ~IXGBE_DMATXCTL_TE));
 
        netif_carrier_off(netdev);
 
@@ -3807,7 +3073,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        /* since we reset the hardware DCA settings were cleared */
-       ixgbe_setup_dca(adapter);
+       if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) {
+               if (dca_add_requester(&adapter->pdev->dev) == 0) {
+                       adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+                       /* always use CB2 mode, difference is masked
+                        * in the CB driver */
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
+                       ixgbe_setup_dca(adapter);
+               }
+       }
 #endif
 }
 
@@ -3824,7 +3098,7 @@ static int ixgbe_poll(struct napi_struct *napi, int 
budget)
        struct ixgbe_q_vector *q_vector =
                                container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       int tx_clean_complete, work_done = 0;
+       int tx_cleaned, work_done = 0;
 
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -3833,24 +3107,20 @@ static int ixgbe_poll(struct napi_struct *napi, int 
budget)
        }
 #endif
 
-       tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
-       ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+       tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
+       ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
 
-       if (!tx_clean_complete)
+       if (tx_cleaned)
                work_done = budget;
 
-#ifndef HAVE_NETDEV_NAPI_LIST
-       if (!netif_running(adapter->netdev))
-               work_done = 0;
-
-#endif
        /* If no Tx and not enough Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->itr_setting & 1)
+       if ((work_done == 0) || !netif_running(adapter->netdev)) {
+               netif_rx_complete(adapter->netdev, napi);
+               if (adapter->itr_setting & 3)
                        ixgbe_set_itr(adapter);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
+                       ixgbe_irq_enable_queues(adapter);
+               return 0;
        }
        return work_done;
 }
@@ -3883,197 +3153,120 @@ static void ixgbe_reset_task(struct work_struct *work)
        ixgbe_reinit_locked(adapter);
 }
 
-
-/**
- * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device
- * @adapter: board private structure to initialize
- *
- * When DCB (Data Center Bridging) is enabled, allocate queues for
- * each traffic class.  If multiqueue isn't availabe, then abort DCB
- * initialization.
- *
- **/
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
-       bool ret = false;
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
-
-       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
-               return ret;
+       int nrq = 1, ntq = 1;
+       int feature_mask = 0, rss_i, rss_m;
+       int dcb_i, dcb_m;
+       int vmdq_i, vmdq_m;
 
+       /* Number of supported queues */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+               dcb_m = 0;
+               vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
+               vmdq_m = 0;
+               rss_i = adapter->ring_feature[RING_F_RSS].indices;
+               rss_m = 0;
+               feature_mask |= IXGBE_FLAG_DCB_ENABLED;
+               feature_mask |= IXGBE_FLAG_VMDQ_ENABLED;
+               feature_mask |= IXGBE_FLAG_RSS_ENABLED;
+
+               switch (adapter->flags & feature_mask) {
+               case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED |
+                     IXGBE_FLAG_VMDQ_ENABLED):
+                       dcb_m = 0x7 << 3;
+                       vmdq_i = min(2, vmdq_i);
+                       vmdq_m = 0x1 << 2;
+                       rss_i = min(4, rss_i);
+                       rss_m = 0x3;
+                       nrq = dcb_i * vmdq_i * rss_i;
+                       ntq = dcb_i * vmdq_i;
+                       break;
+               case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED):
+                       dcb_m = 0x7 << 3;
+                       vmdq_i = min(8, vmdq_i);
+                       vmdq_m = 0x7;
+                       nrq = dcb_i * vmdq_i;
+                       ntq = min(MAX_TX_QUEUES, dcb_i * vmdq_i);
+                       break;
+               case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
+                       dcb_m = 0x7 << 3;
+                       rss_i = min(8, rss_i);
+                       rss_m = 0x7;
+                       nrq = dcb_i * rss_i;
+                       ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
+                       break;
+               case (IXGBE_FLAG_DCB_ENABLED):
 #ifdef HAVE_TX_MQ
-       f->mask = 0x7 << 3;
-       adapter->num_rx_queues = f->indices;
-       adapter->num_tx_queues = f->indices;
-       ret = true;
+                       dcb_m = 0x7 << 3;
+                       nrq = dcb_i;
+                       ntq = dcb_i;
 #else
-       DPRINTK(DRV, INFO, "Kernel has no multiqueue support, disabling DCB\n");
-       f->mask = 0;
-       f->indices = 0;
+                       DPRINTK(DRV, INFO, "Kernel has no multiqueue "
+                               "support, disabling DCB.\n");
+                       /* Fall back onto RSS */
+                       rss_m = 0xF;
+                       nrq = rss_i;
+                       ntq = 1;
+                       dcb_m = 0;
+                       dcb_i = 0;
 #endif
-
-       return ret;
-}
-
-/**
- * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices
- * @adapter: board private structure to initialize
- *
- * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues
- * and VM pools where appropriate.  If RSS is available, then also try and
- * enable RSS and map accordingly.
- *
- **/
-static inline bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter)
-{
-       int vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
-       int vmdq_m = 0;
-       int rss_i = adapter->ring_feature[RING_F_RSS].indices;
-       int rss_m = adapter->ring_feature[RING_F_RSS].mask;
-       unsigned long i;
-       int rss_shift;
-       bool ret = false;
-
-       switch (adapter->flags &
-           (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
-
-       case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       vmdq_i = min(IXGBE_MAX_VMDQ_INDICES, vmdq_i);
-                       if (vmdq_i > 32)
-                               rss_i = 2;
-                       else
-                               rss_i = 4;
-                       i = rss_i;
-                       rss_shift = find_first_bit(&i, sizeof(i) * 8);
-                       rss_m = (rss_i - 1);
-                       vmdq_m = ((IXGBE_MAX_VMDQ_INDICES - 1) <<
-                                  rss_shift) & (MAX_RX_QUEUES - 1);
-               }
-               adapter->num_rx_queues = vmdq_i * rss_i;
-               adapter->num_tx_queues = min(MAX_TX_QUEUES, vmdq_i * rss_i);
-               ret = true;
-               break;
-
-       case (IXGBE_FLAG_VMDQ_ENABLED):
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1) << 1;
-               else
-                       vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1);
-               adapter->num_rx_queues = vmdq_i;
-               adapter->num_tx_queues = vmdq_i;
-               ret = true;
-               break;
-
-       default:
-               ret = false;
-               goto vmdq_queues_out;
-       }
-
-       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
-               adapter->num_rx_pools = vmdq_i;
-               adapter->num_rx_queues_per_pool = adapter->num_rx_queues /
-                                                 vmdq_i;
-       } else {
-               adapter->num_rx_pools = adapter->num_rx_queues;
-               adapter->num_rx_queues_per_pool = 1;
-       }
-       /* save the mask for later use */
-       adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
-vmdq_queues_out:
-       return ret;
-}
-
-/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
- * @adapter: board private structure to initialize
- *
- * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
- * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
- *
- **/
-static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
-{
-       bool ret = false;
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
-
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               f->mask = 0xF;
-               adapter->num_rx_queues = f->indices;
+                       break;
+               case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
+                       vmdq_i = min(4, vmdq_i);
+                       vmdq_m = 0x3 << 3;
+                       rss_m = 0xF;
+                       nrq = vmdq_i * rss_i;
+                       ntq = min(MAX_TX_QUEUES, vmdq_i * rss_i);
+                       break;
+               case (IXGBE_FLAG_VMDQ_ENABLED):
+                       vmdq_m = 0xF;
+                       nrq = vmdq_i;
+                       ntq = vmdq_i;
+                       break;
+               case (IXGBE_FLAG_RSS_ENABLED):
+                       rss_m = 0xF;
+                       nrq = rss_i;
 #ifdef HAVE_TX_MQ
-               adapter->num_tx_queues = f->indices;
+                       ntq = rss_i;
+#else
+                       ntq = 1;
 #endif
-               ret = true;
-       }
-
-       return ret;
-}
-
-/**
- * ixgbe_set_fdir_queues: Allocate queues for Flow Director
- * @adapter: board private structure to initialize
- *
- * Flow Director is an advanced Rx filter, attempting to get Rx flows back
- * to the original CPU that initiated the Tx session.  This runs in addition
- * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
- * Rx load across CPUs using RSS.
- *
- **/
-static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
-{
-       bool ret = false;
-       struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+                       break;
+               case 0:
+               default:
+                       dcb_i = 0;
+                       dcb_m = 0;
+                       rss_i = 0;
+                       rss_m = 0;
+                       vmdq_i = 0;
+                       vmdq_m = 0;
+                       nrq = 1;
+                       ntq = 1;
+                       break;
+               }
 
-       f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
-       f_fdir->mask = 0;
+               /* sanity check, we should never have zero queues */
+               nrq = (nrq ?:1);
+               ntq = (ntq ?:1);
 
-       /* Flow Director must have RSS enabled */
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
-           ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
-            (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
-               adapter->num_rx_queues = f_fdir->indices;
-#ifdef HAVE_TX_MQ
-               adapter->num_tx_queues = f_fdir->indices;
-#endif
-               ret = true;
-       } else {
-               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-               adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+               adapter->ring_feature[RING_F_DCB].indices = dcb_i;
+               adapter->ring_feature[RING_F_DCB].mask = dcb_m;
+               adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+               adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+               adapter->ring_feature[RING_F_RSS].indices = rss_i;
+               adapter->ring_feature[RING_F_RSS].mask = rss_m;
+               break;
+       default:
+               nrq = 1;
+               ntq = 1;
+               break;
        }
-       return ret;
-}
-
-/*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependant
- * @adapter: board private structure to initialize
- *
- * This is the top level queue allocation routine.  The order here is very
- * important, starting with the "most" number of features turned on at once,
- * and ending with the smallest set of features.  This way large combinations
- * can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
- *
- **/
-static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
-{
-       /* Start with base case */
-       adapter->num_rx_queues = 1;
-       adapter->num_tx_queues = 1;
-       adapter->num_rx_pools = adapter->num_rx_queues;
-       adapter->num_rx_queues_per_pool = 1;
 
-       if (ixgbe_set_vmdq_queues(adapter))
-               return;
-
-       if (ixgbe_set_dcb_queues(adapter))
-               return;
-
-       if (ixgbe_set_fdir_queues(adapter))
-               return;
-
-
-       if (ixgbe_set_rss_queues(adapter))
-               return;
+       adapter->num_rx_queues = nrq;
+       adapter->num_tx_queues = ntq;
 }
 
 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -4114,222 +3307,131 @@ static void ixgbe_acquire_msix_vectors(struct 
ixgbe_adapter *adapter,
                adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
+               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+               adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+               adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+               ixgbe_set_num_queues(adapter);
        } else {
                adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
-               /*
-                * Adjust for only the vectors we'll use, which is minimum
-                * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
-                * vectors we were allocated.
-                */
-               adapter->num_msix_vectors = min(vectors,
-                                  adapter->max_msix_q_vectors + NON_Q_VECTORS);
+               adapter->num_msix_vectors = vectors;
        }
 }
 
 /**
- * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for RSS to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
-{
-       int i;
-
-       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
-               return false;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               adapter->rx_ring[i].reg_idx = i;
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               adapter->tx_ring[i].reg_idx = i;
-
-       return true;
-}
-
-/**
- * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
  *
- * Cache the descriptor ring offsets for DCB to the assigned rings.
- *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
  **/
-static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 {
-       int i;
-       bool ret = false;
-       int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
-
-       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
-               return false;
+       int feature_mask = 0, rss_i;
+       int i, txr_idx, rxr_idx;
+       int dcb_i;
+       int vmdq_i, k;
 
-       /* the number of queues is assumed to be symmetric */
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               for (i = 0; i < dcb_i; i++) {
-                       adapter->rx_ring[i].reg_idx = i << 3;
-                       adapter->tx_ring[i].reg_idx = i << 2;
-               }
-               ret = true;
-       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               if (dcb_i == 8) {
-                       /*
-                        * Tx TC0 starts at: descriptor queue 0
-                        * Tx TC1 starts at: descriptor queue 32
-                        * Tx TC2 starts at: descriptor queue 64
-                        * Tx TC3 starts at: descriptor queue 80
-                        * Tx TC4 starts at: descriptor queue 96
-                        * Tx TC5 starts at: descriptor queue 104
-                        * Tx TC6 starts at: descriptor queue 112
-                        * Tx TC7 starts at: descriptor queue 120
-                        *
-                        * Rx TC0-TC7 are offset by 16 queues each
-                        */
-                       for (i = 0; i < 3; i++) {
-                               adapter->tx_ring[i].reg_idx = i << 5;
-                               adapter->rx_ring[i].reg_idx = i << 4;
+       /* Number of supported queues */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+               vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
+               rss_i = adapter->ring_feature[RING_F_RSS].indices;
+               txr_idx = 0;
+               rxr_idx = 0;
+               feature_mask |= IXGBE_FLAG_DCB_ENABLED;
+               feature_mask |= IXGBE_FLAG_VMDQ_ENABLED;
+               feature_mask |= IXGBE_FLAG_RSS_ENABLED;
+               switch (adapter->flags & feature_mask) {
+               case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED |
+                     IXGBE_FLAG_VMDQ_ENABLED):
+                       for (i = 0; i < dcb_i; i++) {
+                       int j;
+                       for (j = 0; j < vmdq_i; j++) {
+                       for (k = 0; k < rss_i; k++) {
+                               adapter->rx_ring[rxr_idx].reg_idx = i << 3 |
+                                                                   j << 2 |
+                                                                   k;
+                               rxr_idx++;
                        }
-                       for ( ; i < 5; i++) {
-                               adapter->tx_ring[i].reg_idx = ((i + 2) << 4);
-                               adapter->rx_ring[i].reg_idx = i << 4;
+                               adapter->tx_ring[txr_idx].reg_idx = i << 2 | j;
+                               txr_idx++;
                        }
-                       for ( ; i < dcb_i; i++) {
-                               adapter->tx_ring[i].reg_idx = ((i + 8) << 3);
-                               adapter->rx_ring[i].reg_idx = i << 4;
                        }
-                       ret = true;
-               } else if (dcb_i == 4) {
-                       /*
-                        * Tx TC0 starts at: descriptor queue 0
-                        * Tx TC1 starts at: descriptor queue 64
-                        * Tx TC2 starts at: descriptor queue 96
-                        * Tx TC3 starts at: descriptor queue 112
-                        *
-                        * Rx TC0-TC3 are offset by 32 queues each
-                        */
-                       adapter->tx_ring[0].reg_idx = 0;
-                       adapter->tx_ring[1].reg_idx = 64;
-                       adapter->tx_ring[2].reg_idx = 96;
-                       adapter->tx_ring[3].reg_idx = 112;
-                       for (i = 0 ; i < dcb_i; i++)
-                               adapter->rx_ring[i].reg_idx = i << 5;
-                       ret = true;
-               }
-       }
-
-       return ret;
-}
-
-/**
- * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for VMDq to the assigned rings.  It
- * will also try to cache the proper offsets if RSS is enabled along with
- * VMDq.
- *
- **/
-static inline bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter)
-{
-       int i;
-       bool ret = false;
-
-       switch (adapter->flags &
-           (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
-
-       case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       /* since the # of rss queues per vmdq pool is
-                        * limited to either 2 or 4, there is no index
-                        * skipping and we can set them up with no
-                        * funky mapping
-                        */
+                       break;
+               case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED):
+                       for (i = 0; i < dcb_i; i++) {
+                       int j;
+                       for (j = 0; j < vmdq_i; j++) {
+                               adapter->rx_ring[rxr_idx].reg_idx = i << 3 | j;
+                               adapter->tx_ring[txr_idx].reg_idx = i << 2 |
+                                                                   (j >> 1);
+                               rxr_idx++;
+                               if (j & 1)
+                                       txr_idx++;
+                       }
+                       }
+                       break;
+               case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
+                       for (i = 0; i < dcb_i; i++) {
+                       int j;
+                       /* Rx first */
+                       for (j = 0; j < adapter->num_rx_queues; j++) {
+                               adapter->rx_ring[rxr_idx].reg_idx = i << 3 | j;
+                               rxr_idx++;
+                       }
+                       /* Tx now */
+                       for (j = 0; j < adapter->num_tx_queues; j++) {
+                               adapter->tx_ring[txr_idx].reg_idx = i << 2 |
+                                                                   (j >> 1);
+                               if (j & 1)
+                                       txr_idx++;
+                       }
+                       }
+                       break;
+               case (IXGBE_FLAG_DCB_ENABLED):
+                       /* the number of queues is assumed to be symmetric */
+                       for (i = 0; i < dcb_i; i++) {
+                               adapter->rx_ring[i].reg_idx = i << 3;
+                               adapter->tx_ring[i].reg_idx = i << 2;
+                       }
+                       break;
+               case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
+                       for (i = 0; i < vmdq_i; i++) {
+                       int j;
+                       for (j = 0; j < rss_i; j++) {
+                               adapter->rx_ring[rxr_idx].reg_idx = i << 4 | j;
+                               adapter->tx_ring[txr_idx].reg_idx = i << 3 |
+                                                                   (j >> 1);
+                               rxr_idx++;
+                               if (j & 1)
+                                       txr_idx++;
+                       }
+                       }
+                       break;
+               case (IXGBE_FLAG_VMDQ_ENABLED):
                        for (i = 0; i < adapter->num_rx_queues; i++)
                                adapter->rx_ring[i].reg_idx = i;
-                       for (i = 0; i < adapter->num_tx_queues; i++)
+                       for (i = 0;  i < adapter->num_tx_queues; i++)
                                adapter->tx_ring[i].reg_idx = i;
-                       ret = true;
-               }
-               break;
-
-       case (IXGBE_FLAG_VMDQ_ENABLED):
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       break;
+               case (IXGBE_FLAG_RSS_ENABLED):
                        for (i = 0; i < adapter->num_rx_queues; i++)
                                adapter->rx_ring[i].reg_idx = i;
                        for (i = 0; i < adapter->num_tx_queues; i++)
                                adapter->tx_ring[i].reg_idx = i;
-                       ret = true;
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       /* even without rss, there are 2 queues per
-                        * pool, the odd numbered ones are unused.
-                        */
-                       for (i = 0; i < adapter->num_rx_queues; i++)
-                               adapter->rx_ring[i].reg_idx = i * 2;
-                       for (i = 0; i < adapter->num_tx_queues; i++)
-                               adapter->tx_ring[i].reg_idx = i * 2;
-                       ret = true;
+                       break;
+               case 0:
+               default:
+                       break;
                }
                break;
+       default:
+               break;
        }
-
-       return ret;
-}
-
-/**
- * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow 
Director
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for Flow Director to the assigned rings.
- *
- **/
-static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
-{
-       int i;
-       bool ret = false;
-
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
-           ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
-            (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i].reg_idx = i;
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i].reg_idx = i;
-               ret = true;
-       }
-
-       return ret;
 }
 
-/**
- * ixgbe_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
- *
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
- *
- * Note, the order the various feature calls is important.  It must start with
- * the "most" features enabled at the same time, then trickle down to the
- * least amount of features turned on at once.
- **/
-static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
-{
-       /* start with default case */
-       adapter->rx_ring[0].reg_idx = 0;
-       adapter->tx_ring[0].reg_idx = 0;
-
-       if (ixgbe_cache_ring_vmdq(adapter))
-               return;
-
-       if (ixgbe_cache_ring_dcb(adapter))
-               return;
-
-       if (ixgbe_cache_ring_fdir(adapter))
-               return;
-
-       if (ixgbe_cache_ring_rss(adapter))
-               return;
-
-}
 
 /**
  * ixgbe_alloc_queues - Allocate memory for all rings
@@ -4350,15 +3452,12 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter 
*adapter)
 
        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
                                   sizeof(struct ixgbe_ring), GFP_KERNEL);
-
        if (!adapter->rx_ring)
                goto err_rx_ring_allocation;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
                adapter->tx_ring[i].count = adapter->tx_ring_count;
                adapter->tx_ring[i].queue_index = i;
-               adapter->tx_ring[i].atr_sample_rate = adapter->atr_sample_rate;
-               adapter->tx_ring[i].atr_count = 0;
        }
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -4385,7 +3484,6 @@ err_tx_ring_allocation:
  **/
 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
        int err = 0;
        int vector, v_budget;
 
@@ -4403,36 +3501,42 @@ static int ixgbe_set_interrupt_capability(struct 
ixgbe_adapter *adapter)
 
        /*
         * At the same time, hardware can only support a maximum of
-        * hw.mac->max_msix_vectors vectors.  With features
-        * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
-        * descriptor queues supported by our device.  Thus, we cap it off in
-        * those rare cases where the cpu count also exceeds our vector limit.
+        * MAX_MSIX_COUNT vectors.  With features such as RSS and VMDq,
+        * we can easily reach upwards of 64 Rx descriptor queues and
+        * 32 Tx queues.  Thus, we cap it off in those rare cases where
+        * the cpu count also exceeds our vector limit.
         */
-       v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
+       v_budget = min(v_budget, MAX_MSIX_COUNT);
 
        /* A failure in MSI-X entry allocation isn't fatal, but it does
         * mean we disable MSI-X capabilities of the adapter. */
        adapter->msix_entries = kcalloc(v_budget,
                                        sizeof(struct msix_entry), GFP_KERNEL);
-       if (adapter->msix_entries) {
-               for (vector = 0; vector < v_budget; vector++)
-                       adapter->msix_entries[vector].entry = vector;
-
-               ixgbe_acquire_msix_vectors(adapter, v_budget);
-
-               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+       if (!adapter->msix_entries) {
+               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+               adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+               adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+               ixgbe_set_num_queues(adapter);
+               kfree(adapter->tx_ring);
+               kfree(adapter->rx_ring);
+               err = ixgbe_alloc_queues(adapter);
+               if (err) {
+                       DPRINTK(PROBE, ERR, "Unable to allocate memory "
+                                           "for queues\n");
                        goto out;
+               }
 
+               goto try_msi;
        }
 
-       adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-       adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
-       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-       adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-       adapter->atr_sample_rate = 0;
-       adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
-       adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-       ixgbe_set_num_queues(adapter);
+       for (vector = 0; vector < v_budget; vector++)
+               adapter->msix_entries[vector].entry = vector;
+
+       ixgbe_acquire_msix_vectors(adapter, v_budget);
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+               goto out;
 
 try_msi:
        if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE))
@@ -4450,130 +3554,17 @@ try_msi:
 
 out:
 #ifdef HAVE_TX_MQ
-       /* Notify the stack of the (possibly) reduced Tx Queue count. */
 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
+       /* Notify the stack of the (possibly) reduced Tx Queue count. */
        adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
-#else
+#else /* CONFIG_NETDEVICES_MULTIQUEUE */
        adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
-#endif
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
 #endif /* HAVE_TX_MQ */
        return err;
 }
 
-/**
- * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt.  If allocation fails we
- * return -ENOMEM.
- **/
-static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
-{
-       int v_idx, num_q_vectors;
-       struct ixgbe_q_vector *q_vector;
-       int rx_vectors;
-#ifdef CONFIG_IXGBE_NAPI
-       int (*poll)(struct napi_struct *, int);
-#endif
-
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-               rx_vectors = adapter->num_rx_queues;
-#ifdef CONFIG_IXGBE_NAPI
-               poll = &ixgbe_clean_rxtx_many;
-#endif
-       } else {
-               num_q_vectors = 1;
-               rx_vectors = 1;
-#ifdef CONFIG_IXGBE_NAPI
-               poll = &ixgbe_poll;
-#endif
-       }
-
-       for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
-               if (!q_vector)
-                       goto err_out;
-               q_vector->adapter = adapter;
-               q_vector->eitr = adapter->eitr_param;
-               q_vector->v_idx = v_idx;
-#ifndef IXGBE_NO_LRO
-               if (v_idx < rx_vectors) {
-                       int size = sizeof(struct ixgbe_lro_list);
-                       q_vector->lrolist = vmalloc(size);
-                       if (!q_vector->lrolist) {
-                               kfree(q_vector);
-                               goto err_out;
-                       }
-                       memset(q_vector->lrolist, 0, size);
-                       ixgbe_lro_ring_init(q_vector->lrolist);
-               }
-#endif
-#ifdef CONFIG_IXGBE_NAPI
-               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
-#endif
-               adapter->q_vector[v_idx] = q_vector;
-       }
-
-       return 0;
-
-err_out:
-       while (v_idx) {
-               v_idx--;
-               q_vector = adapter->q_vector[v_idx];
-#ifdef CONFIG_IXGBE_NAPI
-               netif_napi_del(&q_vector->napi);
-#endif
-#ifndef IXGBE_NO_LRO
-               if (q_vector->lrolist) {
-                       ixgbe_lro_ring_exit(q_vector->lrolist);
-                       vfree(q_vector->lrolist);
-                       q_vector->lrolist = NULL;
-               }
-#endif
-               kfree(q_vector);
-               adapter->q_vector[v_idx] = NULL;
-       }
-       return -ENOMEM;
-}
-
-/**
- * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors.  In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- **/
-static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
-{
-       int v_idx, num_q_vectors;
-
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       } else {
-               num_q_vectors = 1;
-       }
-
-       for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
-
-               adapter->q_vector[v_idx] = NULL;
-#ifdef CONFIG_IXGBE_NAPI
-               netif_napi_del(&q_vector->napi);
-#endif
-#ifndef IXGBE_NO_LRO
-               if (q_vector->lrolist) {
-                       ixgbe_lro_ring_exit(q_vector->lrolist);
-                       vfree(q_vector->lrolist);
-                       q_vector->lrolist = NULL;
-               }
-#endif
-               kfree(q_vector);
-       }
-}
-
-static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
 {
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
@@ -4604,25 +3595,18 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter 
*adapter)
        /* Number of supported queues */
        ixgbe_set_num_queues(adapter);
 
-       err = ixgbe_set_interrupt_capability(adapter);
-       if (err) {
-               DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
-               goto err_set_interrupt;
-       }
-
-       err = ixgbe_alloc_q_vectors(adapter);
-       if (err) {
-               DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
-                       "vectors\n");
-               goto err_alloc_q_vectors;
-       }
-
        err = ixgbe_alloc_queues(adapter);
        if (err) {
                DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
                goto err_alloc_queues;
        }
 
+       err = ixgbe_set_interrupt_capability(adapter);
+       if (err) {
+               DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
+               goto err_set_interrupt;
+       }
+
        DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
                           "Tx Queue count = %u\n",
                (adapter->num_rx_queues > 1) ? "Enabled" :
@@ -4631,30 +3615,12 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter 
*adapter)
        set_bit(__IXGBE_DOWN, &adapter->state);
 
        return 0;
-err_alloc_queues:
-       ixgbe_free_q_vectors(adapter);
-err_alloc_q_vectors:
-       ixgbe_reset_interrupt_capability(adapter);
-err_set_interrupt:
-       return err;
-}
 
-/**
- * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
- * @adapter: board private structure to clear interrupt scheme on
- *
- * We go through and clear interrupt specific resources and reset the structure
- * to pre-load conditions
- **/
-void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
-{
+err_set_interrupt:
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
-       adapter->tx_ring = NULL;
-       adapter->rx_ring = NULL;
-
-       ixgbe_free_q_vectors(adapter);
-       ixgbe_reset_interrupt_capability(adapter);
+err_alloc_queues:
+       return err;
 }
 
 /**
@@ -4684,7 +3650,7 @@ static void ixgbe_sfp_task(struct work_struct *work)
        if ((hw->phy.type == ixgbe_phy_nl) &&
            (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
                s32 ret = hw->phy.ops.identify_sfp(hw);
-               if (ret && ret != IXGBE_ERR_SFP_NOT_SUPPORTED)
+               if (ret)
                        goto reschedule;
                ret = hw->phy.ops.reset(hw);
                if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -4693,7 +3659,6 @@ static void ixgbe_sfp_task(struct work_struct *work)
                                "Reload the driver after installing a "
                                "supported module.\n");
                        unregister_netdev(adapter->netdev);
-                       adapter->netdev_registered = false;
                } else {
                        DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
                                hw->phy.sfp_type);
@@ -4731,7 +3696,17 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter 
*adapter)
        hw->subsystem_device_id = pdev->subsystem_device;
 
        err = ixgbe_init_shared_code(hw);
-       if (err) {
+       if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+               /* start a kernel thread to watch for a module to arrive */
+               set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+               mod_timer(&adapter->sfp_timer,
+                         round_jiffies(jiffies + (2 * HZ)));
+               err = 0;
+       } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               DPRINTK(PROBE, ERR, "failed to load because an "
+                       "unsupported SFP+ module type was detected.\n");
+               goto out;
+       } else if (err) {
                DPRINTK(PROBE, ERR, "init_shared_code failed: %d\n", err);
                goto out;
        }
@@ -4739,7 +3714,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter 
*adapter)
        /* Set capability flags */
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
-               if (hw->device_id == IXGBE_DEV_ID_82598AT)
+               if (hw->mac.ops.get_media_type &&
+                   (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
                adapter->flags |= IXGBE_FLAG_MSI_CAPABLE;
@@ -4748,35 +3724,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter 
*adapter)
                        adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
                if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
                        adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
-#ifdef IXGBE_RSS
+#ifdef CONFIG_IXGBE_RSS
                if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
                        adapter->flags |= IXGBE_FLAG_RSS_CAPABLE;
 #endif
                if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
                        adapter->flags |= IXGBE_FLAG_VMDQ_CAPABLE;
-#ifndef IXGBE_NO_HW_RSC
-               adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
-#endif
-               adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598;
-               break;
-       case ixgbe_mac_82599EB:
-#ifndef IXGBE_NO_HW_RSC
-               adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
-#endif
-               adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
-               adapter->flags |= IXGBE_FLAG_MSI_CAPABLE;
-               adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
-               if (adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)
-                       adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
-               if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
-                       adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
-#ifdef IXGBE_RSS
-               if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
-                       adapter->flags |= IXGBE_FLAG_RSS_CAPABLE;
-#endif
-               if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
-                       adapter->flags |= IXGBE_FLAG_VMDQ_CAPABLE;
-               adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599;
                break;
        default:
                break;
@@ -4798,24 +3751,17 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter 
*adapter)
                adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
                adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
                adapter->dcb_cfg.rx_pba_cfg = pba_equal;
-               adapter->dcb_cfg.pfc_mode_enable = false;
                adapter->dcb_cfg.round_robin_enable = false;
                adapter->dcb_set_bitmap = 0x00;
        }
-#ifdef CONFIG_DCB
-       ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
-                          adapter->ring_feature[RING_F_DCB].indices);
-#endif
 
        /* default flow control settings */
-       hw->fc.requested_mode = ixgbe_fc_full;
-       hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
-       adapter->last_lfc_mode = hw->fc.current_mode;
+       hw->fc.current_mode = ixgbe_fc_none;
+       hw->fc.requested_mode = ixgbe_fc_none;
        hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
        hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
-       hw->fc.disable_fc_autoneg = false;
 
        /* set defaults for eitr in MegaBytes */
        adapter->eitr_low = 10;
@@ -4853,7 +3799,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter 
*adapter,
        memset(tx_ring->tx_buffer_info, 0, size);
 
        /* round up to nearest 4K */
-       tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+       tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
+                       sizeof(u32);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
        tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
@@ -4875,30 +3822,6 @@ err:
 }
 
 /**
- * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
-{
-       int i, err = 0;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
-               if (!err)
-                       continue;
-               DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
-               break;
-       }
-       return err;
-}
-
-/**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
@@ -4911,6 +3834,22 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter 
*adapter,
        struct pci_dev *pdev = adapter->pdev;
        int size;
 
+#ifndef IXGBE_NO_INET_LRO
+       size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
+       rx_ring->lro_mgr.lro_arr = vmalloc(size);
+       if (!rx_ring->lro_mgr.lro_arr)
+               return -ENOMEM;
+       memset(rx_ring->lro_mgr.lro_arr, 0, size);
+
+#endif /* IXGBE_NO_INET_LRO */
+#ifndef IXGBE_NO_LRO
+       size = sizeof(struct ixgbe_lro_list);
+       rx_ring->lrolist = vmalloc(size);
+       if (!rx_ring->lrolist)
+               return -ENOMEM;
+       memset(rx_ring->lrolist, 0, size);
+
+#endif
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
        rx_ring->rx_buffer_info = vmalloc(size);
        if (!rx_ring->rx_buffer_info) {
@@ -4942,36 +3881,36 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter 
*adapter,
        rx_ring->work_limit = rx_ring->count / 2;
 #endif
 
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+            rx_ring->queue_index) {
+                       rx_ring->active = 0;
+                       rx_ring->allocated = 0;
+       } else {
+#endif
+               rx_ring->active = 1;
+               rx_ring->allocated = 1;
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       }
+#endif
+
+#ifndef IXGBE_NO_LRO
+       ixgbe_lro_ring_init(rx_ring->lrolist, adapter);
+#endif
        return 0;
 alloc_failed:
+#ifndef IXGBE_NO_INET_LRO
+       vfree(rx_ring->lro_mgr.lro_arr);
+       rx_ring->lro_mgr.lro_arr = NULL;
+#endif
+#ifndef IXGBE_NO_LRO
+       vfree(rx_ring->lrolist);
+       rx_ring->lrolist = NULL;
+#endif
        return -ENOMEM;
 }
 
 /**
- * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
-{
-       int i, err = 0;
-
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
-               if (!err)
-                       continue;
-               DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
-               break;
-       }
-       return err;
-}
-
-/**
  * ixgbe_free_tx_resources - Free Tx Resources per Queue
  * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
@@ -5004,8 +3943,7 @@ static void ixgbe_free_all_tx_resources(struct 
ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               if (adapter->tx_ring[i].desc)
-                       ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+               ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
 }
 
 /**
@@ -5020,6 +3958,16 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter 
*adapter,
 {
        struct pci_dev *pdev = adapter->pdev;
 
+#ifndef IXGBE_NO_INET_LRO
+       vfree(rx_ring->lro_mgr.lro_arr);
+       rx_ring->lro_mgr.lro_arr = NULL;
+#endif
+#ifndef IXGBE_NO_LRO
+       if (rx_ring->lrolist)
+               ixgbe_lro_ring_exit(rx_ring->lrolist);
+       vfree(rx_ring->lrolist);
+       rx_ring->lrolist = NULL;
+#endif
        ixgbe_clean_rx_ring(adapter, rx_ring);
 
        vfree(rx_ring->rx_buffer_info);
@@ -5041,8 +3989,58 @@ static void ixgbe_free_all_rx_resources(struct 
ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               if (adapter->rx_ring[i].desc)
-                       ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+               ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+       int i, err = 0;
+
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               if (!err)
+                       continue;
+               DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+               break;
+       }
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       adapter->rx_queues_allocated = 0;
+#endif
+       return err;
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+       int i, err = 0;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               if (!err)
+                       continue;
+               DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+               break;
+       }
+       return err;
 }
 
 /**
@@ -5061,6 +4059,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, 
int new_mtu)
        if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
                return -EINVAL;
 
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       /* Jumbo frames not currently supported in VMDq mode under Xen */
+       if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
+           (max_frame > ETH_FRAME_LEN))
+               return -EINVAL;
+#endif
        DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
                netdev->mtu, new_mtu);
        /* must set new MTU before calling down or up */
@@ -5087,7 +4091,6 @@ static int ixgbe_change_mtu(struct net_device *netdev, 
int new_mtu)
 static int ixgbe_open(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
        int err;
 
        /* disallow open during test */
@@ -5106,48 +4109,26 @@ static int ixgbe_open(struct net_device *netdev)
 
        ixgbe_configure(adapter);
 
-       /*
-        * Map the Tx/Rx rings to the vectors we were allotted.
-        * if request_irq will be called in this function map_rings
-        * must be called *before* up_complete
-        */
-       ixgbe_map_rings_to_vectors(adapter);
-
-       err = ixgbe_up_complete(adapter);
-       if (err)
-               goto err_setup_rx;
-
-       /* clear any pending interrupts, may auto mask */
-       IXGBE_READ_REG(hw, IXGBE_EICR);
-
        err = ixgbe_request_irq(adapter);
        if (err)
                goto err_req_irq;
 
-       ixgbe_irq_enable(adapter, true, true);
-
-       /*
-        * If this adapter has a fan, check to see if we had a failure
-        * before we enabled the interrupt.
-        */
-       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
-               u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-               if (esdp & IXGBE_ESDP_SDP1)
-                       DPRINTK(DRV, CRIT,
-                               "Fan has stopped, replace the adapter\n");
-       }
+       err = ixgbe_up_complete(adapter);
+       if (err)
+               goto err_up;
 
+       netif_tx_start_all_queues(netdev);
 
        return 0;
 
-err_req_irq:
-       ixgbe_down(adapter);
+err_up:
        ixgbe_release_hw_control(adapter);
        ixgbe_free_irq(adapter);
-err_setup_rx:
+err_req_irq:
        ixgbe_free_all_rx_resources(adapter);
-err_setup_tx:
+err_setup_rx:
        ixgbe_free_all_tx_resources(adapter);
+err_setup_tx:
        ixgbe_reset(adapter);
 
        return err;
@@ -5179,6 +4160,52 @@ static int ixgbe_close(struct net_device *netdev)
        return 0;
 }
 
+#ifdef CONFIG_IXGBE_NAPI
+/**
+ * ixgbe_napi_add_all - prep napi structs for use
+ * @adapter: private struct
+ *
+ * helper function to napi_add each possible q_vector->napi
+ */
+void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
+{
+       int q_idx, q_vectors;
+       int (*poll)(struct napi_struct *, int);
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               poll = &ixgbe_clean_rxonly;
+               /* Only enable as many vectors as we have rx queues. */
+               q_vectors = adapter->num_rx_queues;
+       } else {
+               poll = &ixgbe_poll;
+               /* only one q_vector for legacy modes */
+               q_vectors = 1;
+       }
+
+       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
+               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
+       }
+}
+
+void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
+{
+       int q_idx;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+       /* legacy and MSI only use one vector */
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+               q_vectors = 1;
+
+       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
+               if (!q_vector->rxr_count)
+                       continue;
+               netif_napi_del(&q_vector->napi);
+       }
+}
+
+#endif
 #ifdef CONFIG_PM
 static int ixgbe_resume(struct pci_dev *pdev)
 {
@@ -5196,7 +4223,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
        }
        pci_set_master(pdev);
 
-       pci_wake_from_d3(pdev, false);
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
 
        err = ixgbe_init_interrupt_scheme(adapter);
        if (err) {
@@ -5205,9 +4233,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
                return err;
        }
 
-       ixgbe_reset(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+       ixgbe_napi_add_all(adapter);
 
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+#endif
+       ixgbe_reset(adapter);
 
        if (netif_running(netdev)) {
                err = ixgbe_open(adapter->netdev);
@@ -5219,14 +4249,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        return 0;
 }
+
 #endif /* CONFIG_PM */
-static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 ctrl, fctrl;
-       u32 wufc = adapter->wol;
 #ifdef CONFIG_PM
        int retval = 0;
 #endif
@@ -5239,84 +4267,40 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool 
*enable_wake)
                ixgbe_free_all_tx_resources(adapter);
                ixgbe_free_all_rx_resources(adapter);
        }
+       ixgbe_reset_interrupt_capability(adapter);
 
-       ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+       ixgbe_napi_del_all(adapter);
+#endif
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
        if (retval)
                return retval;
-
 #endif
-       if (wufc) {
-               ixgbe_set_rx_mode(netdev);
-
-               /* turn on all-multi mode if wake on multicast is enabled */
-               if (wufc & IXGBE_WUFC_MC) {
-                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-                       fctrl |= IXGBE_FCTRL_MPE;
-                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-               }
-
-               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-               ctrl |= IXGBE_CTRL_GIO_DIS;
-               IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
-
-               IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
-       } else {
-               IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
-               IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
-       }
-
-       if (wufc && hw->mac.type == ixgbe_mac_82599EB)
-               pci_wake_from_d3(pdev, true);
-       else
-               pci_wake_from_d3(pdev, false);
 
-       *enable_wake = !!wufc;
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
 
        ixgbe_release_hw_control(adapter);
 
        pci_disable_device(pdev);
 
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       int retval;
-       bool wake;
-
-       retval = __ixgbe_shutdown(pdev, &wake);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
        return 0;
 }
-#endif /* CONFIG_PM */
 
 #ifndef USE_REBOOT_NOTIFIER
 static void ixgbe_shutdown(struct pci_dev *pdev)
 {
-       bool wake;
-
-       __ixgbe_shutdown(pdev, &wake);
-
-       if (system_state == SYSTEM_POWER_OFF) {
-               pci_wake_from_d3(pdev, wake);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
+       ixgbe_suspend(pdev, PMSG_SUSPEND);
 }
 
 #endif
+
 /**
  * ixgbe_update_stats - Update the board statistics counters.
  * @adapter: board private structure
@@ -5326,37 +4310,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
-#ifndef IXGBE_NO_LRO
-       u32 flushed = 0, coal = 0, recycled = 0;
-       int num_q_vectors = 1;
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-#endif
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               u64 rsc_count = 0;
-               for (i = 0; i < 16; i++)
-                       adapter->hw_rx_no_dma_resources += IXGBE_READ_REG(hw, 
IXGBE_QPRDC(i));
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       rsc_count += adapter->rx_ring[i].rsc_count;
-               adapter->rsc_count = rsc_count;
-       }
-
-#ifndef IXGBE_NO_LRO
-       for (i = 0; i < num_q_vectors; i++) {
-               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
-               if (!q_vector || !q_vector->lrolist)
-                       continue;
-               flushed += q_vector->lrolist->stats.flushed;
-               coal += q_vector->lrolist->stats.coal;
-               recycled += q_vector->lrolist->stats.recycled;
-       }
-       adapter->lro_stats.flushed = flushed;
-       adapter->lro_stats.coal = coal;
-       adapter->lro_stats.recycled = recycled;
-
-#endif
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        for (i = 0; i < 8; i++) {
                /* for packet buffers not used, the register should read 0 */
@@ -5364,52 +4318,32 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                missed_rx += mpc;
                adapter->stats.mpc[i] += mpc;
                total_mpc += adapter->stats.mpc[i];
-               if (hw->mac.type == ixgbe_mac_82598EB)
-                       adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, 
IXGBE_RNBC(i));
+               adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
                adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
                adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
                adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
                adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
-                                                           IXGBE_PXONRXCNT(i));
-                       adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
-                                                          IXGBE_PXOFFRXCNT(i));
-               } else {
-                       adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
-                                                             IXGBE_PXONRXC(i));
-                       adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
-                                                            IXGBE_PXOFFRXC(i));
-               }
+               adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
+                                                           IXGBE_PXONRXC(i));
+               adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
+                                                           IXGBE_PXONTXC(i));
+               adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
+                                                            IXGBE_PXOFFRXC(i));
+               adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
+                                                            IXGBE_PXOFFTXC(i));
        }
        adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
        /* work around hardware counting issue */
        adapter->stats.gprc -= missed_rx;
 
        /* 82598 hardware only has a 32 bit counter in the high register */
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-               IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
-               adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-               IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
-               adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
-               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
-               adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-               adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
-               adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
-               adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-       } else {
-               adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-               adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-               adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
-               adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-               adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
-       }
+       adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+       adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+       adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        adapter->stats.bprc += bprc;
        adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               adapter->stats.mprc -= bprc;
+       adapter->stats.mprc -= bprc;
        adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
        adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
        adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
@@ -5418,6 +4352,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
        adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
        adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+       adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+       adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
        lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
        adapter->stats.lxontxc += lxon;
        lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
@@ -5465,135 +4401,31 @@ static void ixgbe_watchdog(unsigned long data)
 {
        struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
        struct ixgbe_hw *hw = &adapter->hw;
-       u64 eics = 0;
-       int i;
 
-       /*
-        * Do the watchdog outside of interrupt context due to the lovely
-        * delays that some of the newer hardware requires
-        */
-
-       if (test_bit(__IXGBE_DOWN, &adapter->state))
-               goto watchdog_short_circuit;
-
-
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
-               /*
-                * for legacy and MSI interrupts don't set any bits
-                * that are enabled for EIAM, because this operation
-                * would set *both* EIMS and EICS for any bit in EIAM
-                */
-               IXGBE_WRITE_REG(hw, IXGBE_EICS,
-                       (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-               goto watchdog_reschedule;
-       }
-
-       /* get one bit for every active tx/rx interrupt vector */
-       for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
-               struct ixgbe_q_vector *qv = adapter->q_vector[i];
-               if (qv->rxr_count || qv->txr_count)
-                       eics |= ((u64)1 << i);
+       /* Do the watchdog outside of interrupt context due to the lovely
+        * delays that some of the newer hardware requires */
+       if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+               /* Cause software interrupt to ensure rx rings are cleaned */
+               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+                       u32 eics =
+                        (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
+                       IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
+               } else {
+                       /* for legacy and MSI interrupts don't set any bits that
+                        * are enabled for EIAM, because this operation would
+                        * set *both* EIMS and EICS for any bit in EIAM */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICS,
+                                    (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+               }
+               /* Reset the timer */
+               mod_timer(&adapter->watchdog_timer,
+                         round_jiffies(jiffies + 2 * HZ));
        }
 
-       /* Cause software interrupt to ensure rings are cleaned */
-       ixgbe_irq_rearm_queues(adapter, eics);
-       
-watchdog_reschedule:
-       /* Reset the timer */
-       mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
-
-watchdog_short_circuit:
        schedule_work(&adapter->watchdog_task);
 }
 
 /**
- * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
- * @work: pointer to work_struct containing our data
- **/
-static void ixgbe_multispeed_fiber_task(struct work_struct *work)
-{
-       struct ixgbe_adapter *adapter = container_of(work,
-                                                    struct ixgbe_adapter,
-                                                    multispeed_fiber_task);
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 autoneg;
-
-       adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
-       if (hw->mac.ops.get_link_capabilities)
-               hw->mac.ops.get_link_capabilities(hw, &autoneg,
-                                                 &hw->mac.autoneg);
-       if (hw->mac.ops.setup_link_speed)
-               hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
-       adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
-       adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
-}
-
-/**
- * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
- * @work: pointer to work_struct containing our data
- **/
-static void ixgbe_sfp_config_module_task(struct work_struct *work)
-{
-       struct ixgbe_adapter *adapter = container_of(work,
-                                                    struct ixgbe_adapter,
-                                                    sfp_config_module_task);
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 err;
-
-       adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
-       err = hw->phy.ops.identify_sfp(hw);
-       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               DPRINTK(PROBE, ERR, "failed to load because an "
-                       "unsupported SFP+ module type was detected.\n");
-               unregister_netdev(adapter->netdev);
-               adapter->netdev_registered = false;
-               return;
-       }
-       /*
-        * A module may be identified correctly, but the EEPROM may not have
-        * support for that module.  setup_sfp() will fail in that case, so
-        * we should not allow that module to load.
-        */
-       err = hw->mac.ops.setup_sfp(hw);
-       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               DPRINTK(PROBE, ERR, "failed to load because an "
-                       "unsupported SFP+ module type was detected.\n");
-               unregister_netdev(adapter->netdev);
-               adapter->netdev_registered = false;
-               return;
-       }
-
-       if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
-               /* This will also work for DA Twinax connections */
-               schedule_work(&adapter->multispeed_fiber_task);
-       adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
-}
-
-/**
- * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
- * @work: pointer to work_struct containing our data
- **/
-static void ixgbe_fdir_reinit_task(struct work_struct *work)
-{
-       struct ixgbe_adapter *adapter = container_of(work,
-                                                    struct ixgbe_adapter,
-                                                    fdir_reinit_task);
-       struct ixgbe_hw *hw = &adapter->hw;
-       int i;
-
-       if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       set_bit(__IXGBE_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i].reinit_state));
-       } else {
-               DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
-                       "ignored adding FDIR ATR filters \n");
-       }
-       /* Done FDIR Re-initialization, enable transmits */
-       netif_tx_start_all_queues(adapter->netdev);
-}
-
-/**
  * ixgbe_watchdog_task - worker thread to bring link up
  * @work: pointer to work_struct containing our data
  **/
@@ -5606,9 +4438,6 @@ static void ixgbe_watchdog_task(struct work_struct *work)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 link_speed = adapter->link_speed;
        bool link_up = adapter->link_up;
-       int i;
-       struct ixgbe_ring *tx_ring;
-       int some_tx_pending = 0;
 
        adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
 
@@ -5620,20 +4449,11 @@ static void ixgbe_watchdog_task(struct work_struct 
*work)
                        link_speed = IXGBE_LINK_SPEED_10GB_FULL;
                        link_up = true;
                }
-               if (link_up) {
-                       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                               for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-                                       hw->mac.ops.fc_enable(hw, i);
-                       } else {
-                               hw->mac.ops.fc_enable(hw, 0);
-                       }
-               }
-
                if (link_up ||
                    time_after(jiffies, (adapter->link_check_timeout +
                                         IXGBE_TRY_LINK_TIMEOUT))) {
-                       adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
                        IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
+                       adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
                }
                adapter->link_up = link_up;
                adapter->link_speed = link_speed;
@@ -5641,28 +4461,19 @@ static void ixgbe_watchdog_task(struct work_struct 
*work)
 
        if (link_up) {
                if (!netif_carrier_ok(netdev)) {
-                       bool flow_rx, flow_tx;
-
-                       if (hw->mac.type == ixgbe_mac_82599EB) {
-                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-                               flow_rx = (mflcn & IXGBE_MFLCN_RFCE);
-                               flow_tx = (fccfg & IXGBE_FCCFG_TFCE_802_3X);
-                       } else {
-                               u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-                               u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
-                               flow_rx = (frctl & IXGBE_FCTRL_RFCE);
-                               flow_tx = (rmcs & IXGBE_RMCS_TFCE_802_3X);
-                       }
+                       u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+                       u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
+#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
+#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
                        DPRINTK(LINK, INFO, "NIC Link is Up %s, "
                                "Flow Control: %s\n",
                                (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
                                 "10 Gbps" :
                                 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
                                  "1 Gbps" : "unknown speed")),
-                               ((flow_rx && flow_tx) ? "RX/TX" :
-                                (flow_rx ? "RX" :
-                                (flow_tx ? "TX" : "None"))));
+                               ((FLOW_RX && FLOW_TX) ? "RX/TX" :
+                                (FLOW_RX ? "RX" :
+                                (FLOW_TX ? "TX" : "None"))));
 
                        netif_carrier_on(netdev);
                        netif_tx_wake_all_queues(netdev);
@@ -5680,33 +4491,8 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                }
        }
 
-       if (!netif_carrier_ok(netdev)) {
-               for (i = 0; i < adapter->num_tx_queues; i++) {
-                       tx_ring = &adapter->tx_ring[i];
-                       if (tx_ring->next_to_use != tx_ring->next_to_clean) {
-                               some_tx_pending = 1;
-                               break;
-                       }
-               }
-
-               if (some_tx_pending) {
-                       /* We've lost link, so the controller stops DMA,
-                        * but we've got queued Tx work that's never going
-                        * to get done, so reset controller to flush Tx.
-                        * (Do the reset outside of interrupt context).
-                        */
-                        schedule_work(&adapter->reset_task);
-               }
-       }
-
        ixgbe_update_stats(adapter);
        adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
-
-       if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
-               /* poll faster when waiting for link */
-               mod_timer(&adapter->watchdog_timer, jiffies + (HZ/10));
-       }
-
 }
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring,
@@ -5740,11 +4526,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, 
struct ixgbe_ring *tx_ring,
                        adapter->hw_tso_ctxt++;
 #ifdef NETIF_F_TSO6
                } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
-                       ipv6_hdr(skb)->payload_len = 0;
-                       tcp_hdr(skb)->check =
-                           ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                            &ipv6_hdr(skb)->daddr,
-                                            0, IPPROTO_TCP, 0);
+                       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+                       ipv6h->payload_len = 0;
+                       tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6h->saddr,
+                                                              &ipv6h->daddr,
+                                                              0, IPPROTO_TCP,
+                                                              0);
                        adapter->hw_tso6_ctxt++;
 #endif
                }
@@ -5876,35 +4663,33 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 }
 
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
-                        struct ixgbe_ring *tx_ring,
-                        struct sk_buff *skb, u32 tx_flags,
+                        struct ixgbe_ring *tx_ring, struct sk_buff *skb,
                         unsigned int first)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int len;
-       unsigned int total = skb->len;
+       unsigned int len = skb->len;
        unsigned int offset = 0, size, count = 0, i;
 #ifdef MAX_SKB_FRAGS
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
+
+       len -= skb->data_len;
 #endif
 
        i = tx_ring->next_to_use;
 
-       len = min(skb_headlen(skb), total);
        while (len) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
                size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
 
                tx_buffer_info->length = size;
                tx_buffer_info->dma = pci_map_single(adapter->pdev,
-                                                    skb->data + offset,
-                                                    size, PCI_DMA_TODEVICE);
+                                                    skb->data + offset, size,
+                                                    PCI_DMA_TODEVICE);
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
                len -= size;
-               total -= size;
                offset += size;
                count++;
                i++;
@@ -5917,7 +4702,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                struct skb_frag_struct *frag;
 
                frag = &skb_shinfo(skb)->frags[f];
-               len = min( (unsigned int)frag->size, total);
+               len = frag->size;
                offset = frag->page_offset;
 
                while (len) {
@@ -5926,24 +4711,21 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                        tx_buffer_info->length = size;
                        tx_buffer_info->dma = pci_map_page(adapter->pdev,
-                                                          frag->page,
-                                                          offset,
+                                                          frag->page, offset,
                                                           size,
                                                           PCI_DMA_TODEVICE);
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
 
                        len -= size;
-                       total -= size;
                        offset += size;
                        count++;
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
                }
-               if (total == 0)
-                       break;
        }
+
 #endif
        if (i == 0)
                i = tx_ring->count - 1;
@@ -5988,6 +4770,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
                olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
                                 IXGBE_ADVTXD_POPTS_SHIFT;
+
        olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
        i = tx_ring->next_to_use;
@@ -6017,64 +4800,13 @@ static void ixgbe_tx_queue(struct ixgbe_adapter 
*adapter,
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
 }
 
-static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-                     int queue, u32 tx_flags)
-{
-       /* Right now, we support IPv4 only */
-       struct ixgbe_atr_input atr_input;
-       struct tcphdr *th;
-       struct udphdr *uh;
-       struct iphdr *iph = ip_hdr(skb);
-       struct ethhdr *eth = (struct ethhdr *)skb->data;
-       u16 vlan_id, src_port, dst_port, flex_bytes;
-       u32 src_ipv4_addr, dst_ipv4_addr;
-       u8 l4type = 0;
-
-       /* check if we're UDP or TCP */
-       if (iph->protocol == IPPROTO_TCP) {
-               th = tcp_hdr(skb);
-               src_port = th->source;
-               dst_port = th->dest;
-               l4type |= IXGBE_ATR_L4TYPE_TCP;
-               /* l4type IPv4 type is 0, no need to assign */
-       } else if(iph->protocol == IPPROTO_UDP) {
-               uh = udp_hdr(skb);
-               src_port = uh->source;
-               dst_port = uh->dest;
-               l4type |= IXGBE_ATR_L4TYPE_UDP;
-               /* l4type IPv4 type is 0, no need to assign */
-       } else {
-               /* Unsupported L4 header, just bail here */
-               return;
-       }
-
-       memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
-
-       vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
-                  IXGBE_TX_FLAGS_VLAN_SHIFT;
-       src_ipv4_addr = iph->saddr;
-       dst_ipv4_addr = iph->daddr;
-       flex_bytes = eth->h_proto;
-
-       ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-       ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
-       ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
-       ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
-       ixgbe_atr_set_l4type_82599(&atr_input, l4type);
-       /* src and dst are inverted, think how the receiver sees them */
-       ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
-       ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
-
-       /* This assumes the Rx queue and Tx queue are bound to the same CPU */
-       ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
-}
-
 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                                  struct ixgbe_ring *tx_ring, int size)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        netif_stop_subqueue(netdev, tx_ring->queue_index);
+
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
@@ -6112,8 +4844,9 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct 
net_device *netdev)
 #ifdef MAX_SKB_FRAGS
        unsigned int f;
 #endif
+
 #ifdef HAVE_TX_MQ
-       r_idx = skb->queue_mapping;
+       r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
 #endif
        tx_ring = &adapter->tx_ring[r_idx];
 
@@ -6136,25 +4869,26 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct 
net_device *netdev)
 #endif
        }
 #endif
-       /* four things can cause us to need a context descriptor */
+       /* three things can cause us to need a context descriptor */
        if (skb_is_gso(skb) ||
            (skb->ip_summed == CHECKSUM_PARTIAL) ||
            (tx_flags & IXGBE_TX_FLAGS_VLAN))
                count++;
+
        count += TXD_USE_COUNT(skb_headlen(skb));
 #ifdef MAX_SKB_FRAGS
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
 #endif
+
        if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
                adapter->tx_busy++;
                return NETDEV_TX_BUSY;
        }
 
-       first = tx_ring->next_to_use;
        if (skb->protocol == htons(ETH_P_IP))
                tx_flags |= IXGBE_TX_FLAGS_IPV4;
+       first = tx_ring->next_to_use;
        tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
@@ -6167,18 +4901,9 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct 
net_device *netdev)
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IXGBE_TX_FLAGS_CSUM;
 
-       /* add the ATR filter if ATR is on */
-       if (tx_ring->atr_sample_rate) {
-               ++tx_ring->atr_count;
-               if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-                   test_bit(__IXGBE_FDIR_INIT_DONE, &tx_ring->reinit_state)) {
-                       ixgbe_atr(adapter, skb, tx_ring->queue_index, tx_flags);
-                       tx_ring->atr_count = 0;
-               }
-       }
        ixgbe_tx_queue(adapter, tx_ring, tx_flags,
-                      ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first),
-                      skb->len, hdr_len);
+                      ixgbe_tx_map(adapter, tx_ring, skb, first),
+                      skb->len, hdr_len);
 
        netdev->trans_start = jiffies;
 
@@ -6227,50 +4952,6 @@ static int ixgbe_set_mac(struct net_device *netdev, void 
*p)
        return 0;
 }
 
-#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
-/**
- * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
- * netdev->dev_addr_list
- * @netdev: network interface device structure
- *
- * Returns non-zero on failure
- **/
-static int ixgbe_add_sanmac_netdev(struct net_device *dev)
-{
-       int err = 0;
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-       struct ixgbe_mac_info *mac = &adapter->hw.mac;
-
-       if (is_valid_ether_addr(mac->san_addr)) {
-               rtnl_lock();
-               err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
-               rtnl_unlock();
-       }
-       return err;
-}
-
-/**
- * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
- * netdev->dev_addr_list
- * @netdev: network interface device structure
- *
- * Returns non-zero on failure
- **/
-static int ixgbe_del_sanmac_netdev(struct net_device *dev)
-{
-       int err = 0;
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-       struct ixgbe_mac_info *mac = &adapter->hw.mac;
-
-       if (is_valid_ether_addr(mac->san_addr)) {
-               rtnl_lock();
-               err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
-               rtnl_unlock();
-       }
-       return err;
-}
-
-#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */
 #ifdef ETHTOOL_OPS_COMPAT
 /**
  * ixgbe_ioctl -
@@ -6289,6 +4970,190 @@ static int ixgbe_ioctl(struct net_device *netdev, 
struct ifreq *ifr, int cmd)
 }
 
 #endif
+
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+int ixgbe_get_avail_queues(struct net_device *netdev, unsigned int queue_type)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       if (queue_type == VMQ_TYPE_RX)
+               return (adapter->num_rx_queues - adapter->rx_queues_allocated) 
- 1;
+       else if (queue_type == VMQ_TYPE_TX)
+               return 0;
+       else
+               return 0;
+}
+
+int ixgbe_get_vmq_maxsize(struct net_device *netdev)
+{
+       return IXGBE_MAX_TXD;
+}
+
+int ixgbe_alloc_vmq_queue(struct net_device *netdev, unsigned int queue_type)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       if (queue_type == VMQ_TYPE_TX)
+               return -EINVAL;
+
+       if (adapter->rx_queues_allocated >= adapter->num_rx_queues) {
+               return -EINVAL;
+       } else {
+               int i;
+               for (i = 1; i < adapter->num_rx_queues; i++) {
+                       if (!adapter->rx_ring[i].allocated) {
+                               adapter->rx_ring[i].allocated = TRUE;
+                               adapter->rx_queues_allocated++;
+                               return i;
+                       }
+               }
+               return -EINVAL;
+       }
+}
+
+int ixgbe_free_vmq_queue(struct net_device *netdev, int queue)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       if (queue >= adapter->num_rx_queues)
+               return -EINVAL;
+
+       if (!adapter->rx_ring[queue].allocated)
+               return -EINVAL;
+
+       adapter->rx_ring[queue].allocated = FALSE;
+       adapter->rx_queues_allocated--;
+       ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[queue]);
+
+       return 0;
+}
+
+int ixgbe_set_rxqueue_macfilter(struct net_device *netdev, int queue,
+                               u8 *mac_addr)
+{
+       int err = 0;
+       u32 rah;
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_ring *rx_ring = &adapter->rx_ring[queue];
+
+       if ((queue < 0) || (queue > adapter->num_rx_queues))
+               return -EADDRNOTAVAIL;
+
+       /* Note: Broadcast address is used to disable the MAC filter*/
+       if (!is_valid_ether_addr(mac_addr)) {
+
+               memset(rx_ring->mac_addr, 0xFF, ETH_ALEN);
+
+               /* Clear RAR */
+               IXGBE_WRITE_REG(hw, IXGBE_RAL(queue), 0);
+               IXGBE_WRITE_FLUSH(hw);
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(queue), 0);
+               IXGBE_WRITE_FLUSH(hw);
+
+               return -EADDRNOTAVAIL;
+       }
+
+       /* Store in ring */
+       memcpy(rx_ring->mac_addr, mac_addr, ETH_ALEN);
+
+       err = ixgbe_set_rar(&adapter->hw, queue, rx_ring->mac_addr, 1,
+                           IXGBE_RAH_AV);
+
+       if (!err) {
+               /* Set the VIND for the indicated queue's RAR Entry */
+               rah = IXGBE_READ_REG(hw, IXGBE_RAH(queue));
+               rah &= ~IXGBE_RAH_VIND_MASK;
+               rah |= (queue << IXGBE_RAH_VIND_SHIFT);
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(queue), rah);
+               IXGBE_WRITE_FLUSH(hw);
+       }
+
+       return err;
+}
+
+int ixgbe_get_vmq_size(struct net_device *netdev, int queue)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       if (queue >= adapter->num_rx_queues)
+               return -EINVAL;
+       return adapter->rx_ring[queue].count;
+}
+
+int ixgbe_set_vmq_size(struct net_device *netdev, int queue, int size)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       /* Not implemented yet, so just return count. */
+       return adapter->rx_ring[queue].count;
+}
+
+int ixgbe_set_vmq_vlan(struct net_device *netdev, int queue, int vlan_id)
+{
+       return 0;  /* not implemented */
+}
+
+int ixgbe_vmq_enable(struct net_device *netdev, int queue)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 rxdctl;
+
+       if (queue >= adapter->num_rx_queues)
+               return -EINVAL;
+
+       if (!adapter->rx_ring[queue].allocated)
+               return -EINVAL;
+       adapter->rx_ring[queue].active = 1;
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+       rxdctl |= IXGBE_RXDCTL_ENABLE;
+       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), rxdctl);
+       IXGBE_WRITE_FLUSH(hw);
+       ixgbe_alloc_rx_buffers(adapter,
+                              &adapter->rx_ring[queue],
+                              IXGBE_DESC_UNUSED(&adapter->rx_ring[queue]));
+       return 0;
+}
+int ixgbe_vmq_disable(struct net_device *netdev, int queue)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 rxdctl;
+
+       if (queue >= adapter->num_rx_queues)
+               return -EINVAL;
+
+       if (!adapter->rx_ring[queue].allocated)
+               return -EINVAL;
+
+       adapter->rx_ring[queue].active = 0;
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), rxdctl);
+       return 0;
+}
+
+static void ixgbe_setup_vmq(struct ixgbe_adapter *adapter)
+{
+       net_vmq_t *vmq;
+
+       vmq = alloc_vmq(adapter->num_rx_queues);
+       if (vmq) {
+               vmq->avail_queues = ixgbe_get_avail_queues;
+               vmq->alloc_queue = ixgbe_alloc_vmq_queue;
+               vmq->free_queue = ixgbe_free_vmq_queue;
+               vmq->get_maxsize = ixgbe_get_vmq_maxsize;
+               vmq->get_size = ixgbe_get_vmq_size;
+               vmq->set_size = ixgbe_set_vmq_size;
+               vmq->set_mac =  ixgbe_set_rxqueue_macfilter;
+               vmq->set_vlan = ixgbe_set_vmq_vlan;
+               vmq->enable = ixgbe_vmq_enable;
+               vmq->disable = ixgbe_vmq_disable;
+               vmq->nvmq = adapter->num_rx_queues;
+               adapter->netdev->vmq = vmq;
+       }
+}
+#endif /* CONFIG_XEN_NETDEV2_VMQ */
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
  * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -6304,88 +5169,39 @@ static void ixgbe_netpoll(struct net_device *netdev)
        adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
        ixgbe_intr(adapter->pdev->irq, netdev);
        adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
-       ixgbe_irq_enable(adapter, true, true);
+       ixgbe_irq_enable(adapter);
 }
 
 #endif
-#ifdef HAVE_NETDEV_SELECT_QUEUE
-static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
+
+/**
+ * ixgbe_link_config - set up initial link with default speed and duplex
+ * @hw: pointer to private hardware struct
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_link_config(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       u32 autoneg;
+       bool link_up = false;
+       u32 ret = IXGBE_ERR_LINK_SETUP;
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-               return smp_processor_id();
+       if (hw->mac.ops.check_link)
+               ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-               return 0; /* all untagged traffic should default to TC 0 */
-
-       return skb_tx_hash(dev, skb);
-}
-
-#endif /* HAVE_NETDEV_SELECT_QUEUE */
-#ifdef HAVE_NET_DEVICE_OPS
-static const struct net_device_ops ixgbe_netdev_ops = {
-       .ndo_open               = &ixgbe_open,
-       .ndo_stop               = &ixgbe_close,
-       .ndo_start_xmit         = &ixgbe_xmit_frame,
-       .ndo_get_stats          = &ixgbe_get_stats,
-       .ndo_set_rx_mode        = &ixgbe_set_rx_mode,
-       .ndo_set_multicast_list = &ixgbe_set_rx_mode,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = &ixgbe_set_mac,
-       .ndo_change_mtu         = &ixgbe_change_mtu,
-#ifdef ETHTOOL_OPS_COMPAT
-       .ndo_do_ioctl           = &ixgbe_ioctl,
-#endif
-       .ndo_tx_timeout         = &ixgbe_tx_timeout,
-       .ndo_vlan_rx_register   = &ixgbe_vlan_rx_register,
-       .ndo_vlan_rx_add_vid    = &ixgbe_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid   = &ixgbe_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = &ixgbe_netpoll,
-#endif
-       .ndo_select_queue       = &ixgbe_select_queue,
-};
+       if (ret || !link_up)
+               goto link_cfg_out;
 
-#endif /* HAVE_NET_DEVICE_OPS */
+       if (hw->mac.ops.get_link_capabilities)
+               ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
+                                                       &hw->mac.autoneg);
+       if (ret)
+               goto link_cfg_out;
 
-void ixgbe_assign_netdev_ops(struct net_device *dev)
-{
-       struct ixgbe_adapter *adapter;
-       adapter = netdev_priv(dev);
-#ifdef HAVE_NET_DEVICE_OPS
-       dev->netdev_ops = &ixgbe_netdev_ops;
-#else /* HAVE_NET_DEVICE_OPS */
-       dev->open = &ixgbe_open;
-       dev->stop = &ixgbe_close;
-       dev->hard_start_xmit = &ixgbe_xmit_frame;
-       dev->get_stats = &ixgbe_get_stats;
-#ifdef HAVE_SET_RX_MODE
-       dev->set_rx_mode = &ixgbe_set_rx_mode;
-#endif
-       dev->set_multicast_list = &ixgbe_set_rx_mode;
-       dev->set_mac_address = &ixgbe_set_mac;
-       dev->change_mtu = &ixgbe_change_mtu;
-#ifdef ETHTOOL_OPS_COMPAT
-       dev->do_ioctl = &ixgbe_ioctl;
-#endif
-#ifdef HAVE_TX_TIMEOUT
-       dev->tx_timeout = &ixgbe_tx_timeout;
-#endif
-#ifdef NETIF_F_HW_VLAN_TX
-       dev->vlan_rx_register = &ixgbe_vlan_rx_register;
-       dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid;
-       dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid;
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       dev->poll_controller = &ixgbe_netpoll;
-#endif
-#ifdef HAVE_NETDEV_SELECT_QUEUE
-       dev->select_queue = &ixgbe_select_queue;
-#endif /* HAVE_NETDEV_SELECT_QUEUE */
-#endif /* HAVE_NET_DEVICE_OPS */
-       ixgbe_set_ethtool_ops(dev);
-       dev->watchdog_timeo = 5 * HZ;
+       if (hw->mac.ops.setup_link_speed)
+               ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
+link_cfg_out:
+       return ret;
 }
 
 /**
@@ -6407,7 +5223,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        struct ixgbe_hw *hw = NULL;
        static int cards_found;
        int i, err, pci_using_dac;
-       u32 part_num;
 
        err = pci_enable_device(pdev);
        if (err)
@@ -6435,28 +5250,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_pci_reg;
        }
 
-       /*
-        * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch
-        * port to which the 82598 is connected to prevent duplicate
-        * completions caused by LOs.  We need the mac type so that we only
-        * do this on 82598 devices, ixgbe_set_mac_type does this for us if
-        * we set it's device ID.
-        */
-       hw = vmalloc(sizeof(struct ixgbe_hw));
-       if (!hw) {
-               printk(KERN_INFO "Unable to allocate memory for LOs fix "
-                       "- not checked\n");
-       } else {
-               hw->vendor_id = pdev->vendor;
-               hw->device_id = pdev->device;
-               ixgbe_set_mac_type(hw);
-               if (hw->mac.type == ixgbe_mac_82598EB)
-                       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
-               vfree(hw);
-       }
-
-       pci_enable_pcie_error_reporting(pdev);
-
        pci_set_master(pdev);
 
 #ifdef HAVE_TX_MQ
@@ -6481,12 +5274,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 
 #ifdef HAVE_PCI_ERS
-       /*
-        * call save state here in standalone driver because it relies on
-        * adapter struct to exist, and needs to call netdev_priv
-        */
        pci_save_state(pdev);
-
 #endif
        hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
                              pci_resource_len(pdev, 0));
@@ -6500,8 +5288,32 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                        continue;
        }
 
-       ixgbe_assign_netdev_ops(netdev);
-
+       netdev->open = &ixgbe_open;
+       netdev->stop = &ixgbe_close;
+       netdev->hard_start_xmit = &ixgbe_xmit_frame;
+       netdev->get_stats = &ixgbe_get_stats;
+#ifdef HAVE_SET_RX_MODE
+       netdev->set_rx_mode = &ixgbe_set_rx_mode;
+#endif
+       netdev->set_multicast_list = &ixgbe_set_rx_mode;
+       netdev->set_mac_address = &ixgbe_set_mac;
+       netdev->change_mtu = &ixgbe_change_mtu;
+#ifdef ETHTOOL_OPS_COMPAT
+       netdev->do_ioctl = &ixgbe_ioctl;
+#endif
+       ixgbe_set_ethtool_ops(netdev);
+#ifdef HAVE_TX_TIMEOUT
+       netdev->tx_timeout = &ixgbe_tx_timeout;
+       netdev->watchdog_timeo = 5 * HZ;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+       netdev->vlan_rx_register = ixgbe_vlan_rx_register;
+       netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
+       netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       netdev->poll_controller = ixgbe_netpoll;
+#endif
        strcpy(netdev->name, pci_name(pdev));
 
        adapter->bd_number = cards_found;
@@ -6524,47 +5336,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
        INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
 
-       /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
-       INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
-
-       /* a new SFP+ module arrival, called from GPI SDP2 context */
-       INIT_WORK(&adapter->sfp_config_module_task,
-                 ixgbe_sfp_config_module_task);
-
        /* setup the private structure */
        err = ixgbe_sw_init(adapter);
        if (err)
                goto err_sw_init;
 
-       /*
-        * If we have a fan, this is as early we know, warn if we
-        * have had a failure.
-        */
-       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
-               u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-               if (esdp & IXGBE_ESDP_SDP1)
-                       DPRINTK(PROBE, CRIT,
-                               "Fan has stopped, replace the adapter\n");
-       }
-
        /* reset_hw fills in the perm_addr as well */
        err = hw->mac.ops.reset_hw(hw);
-       if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
-           hw->mac.type == ixgbe_mac_82598EB) {
-               /*
-                * Start a kernel thread to watch for a module to arrive.
-                * Only do this for 82598, since 82599 will generate interrupts
-                * on module arrival.
-                */
-               set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
-               mod_timer(&adapter->sfp_timer,
-                         round_jiffies(jiffies + (2 * HZ)));
-               err = 0;
-       } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               DPRINTK(PROBE, ERR, "failed to load because an "
-                       "unsupported SFP+ module type was detected.\n");
-               goto err_sw_init;
-       } else if (err) {
+       if (err) {
                DPRINTK(PROBE, ERR, "HW Init failed: %d\n", err);
                goto err_sw_init;
        }
@@ -6595,34 +5374,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->features |= NETIF_F_TSO6;
 #endif /* NETIF_F_TSO6 */
 #endif /* NETIF_F_TSO */
-#ifdef NETIF_F_GRO
-       netdev->features |= NETIF_F_GRO;
-#endif /* NETIF_F_GRO */
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
-               adapter->flags &= ~(IXGBE_FLAG_FDIR_HASH_CAPABLE
-                                   | IXGBE_FLAG_FDIR_PERFECT_CAPABLE);
-#ifndef IXGBE_NO_HW_RSC
-       if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
-#ifdef NETIF_F_LRO
-               netdev->features |= NETIF_F_LRO;
-#endif
-#ifndef IXGBE_NO_LRO
-               adapter->flags2 &= ~IXGBE_FLAG2_SWLRO_ENABLED;
-#endif
-               adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
-       } else {
-#endif
-#ifndef IXGBE_NO_LRO
-#ifdef NETIF_F_LRO
-               netdev->features |= NETIF_F_LRO;
-#endif
-               adapter->flags2 |= IXGBE_FLAG2_SWLRO_ENABLED;
-#endif
-#ifndef IXGBE_NO_HW_RSC
-               adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
-       }
+#ifndef IXGBE_NO_INET_LRO
+       netdev->features |= NETIF_F_LRO;
+
 #endif
 #ifdef HAVE_NETDEV_VLAN_FEATURES
 #ifdef NETIF_F_TSO
@@ -6635,10 +5391,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->vlan_features |= NETIF_F_SG;
 
 #endif /* HAVE_NETDEV_VLAN_FEATURES */
-#ifdef CONFIG_DCB
-       netdev->dcbnl_ops = &dcbnl_ops;
-#endif
-
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
@@ -6668,6 +5420,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        }
 #endif
 
+       if (hw->mac.ops.get_bus_info)
+               hw->mac.ops.get_bus_info(hw);
+
        init_timer(&adapter->watchdog_timer);
        adapter->watchdog_timer.function = &ixgbe_watchdog;
        adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -6679,61 +5434,45 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
-       switch (pdev->device) {
-       case IXGBE_DEV_ID_82599_KX4:
-               adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
-                               IXGBE_WUFC_MC | IXGBE_WUFC_BC);
-               /* Enable ACPI wakeup in GRC */
-               IXGBE_WRITE_REG(hw, IXGBE_GRC,
-                            (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
-               break;
-       default:
-               adapter->wol = 0;
-               break;
-       }
-       device_init_wakeup(&adapter->pdev->dev, true);
-       device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-
-       /* save off EEPROM version number */
-       ixgbe_read_eeprom(hw, 0x29, &adapter->eeprom_version);
-
        /* reset the hardware with the new settings */
-       err = hw->mac.ops.start_hw(hw);
-       if (err == IXGBE_ERR_EEPROM_VERSION) {
-               /* We are running on a pre-production device, log a warning */
-               DPRINTK(PROBE, INFO, "This device is a pre-production adapter/"
-                       "LOM.  Please be aware there may be issues associated "
-                       "with your hardware.  If you are experiencing problems "
-                       "please contact your Intel or hardware representative "
-                       "who provided you with this hardware.\n");
-       }
-       /* pick up the PCI bus settings for reporting later */
-       if (hw->mac.ops.get_bus_info)
-               hw->mac.ops.get_bus_info(hw);
+       hw->mac.ops.start_hw(hw);
 
+       /* link_config depends on ixgbe_start_hw being called at least once */
+       err = ixgbe_link_config(hw);
+       if (err) {
+               DPRINTK(PROBE, ERR, "setup_link_speed FAILED %d\n", err);
+               goto err_register;
+       }
 
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
-           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
-               INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
+#ifdef CONFIG_IXGBE_NAPI
+       ixgbe_napi_add_all(adapter);
 
+#endif
        strcpy(netdev->name, "eth%d");
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+               ixgbe_setup_vmq(adapter);
+#endif
        err = register_netdev(netdev);
        if (err)
                goto err_register;
 
-       adapter->netdev_registered = true;
+#ifndef CONFIG_XEN_NETDEV2_VMQ
+       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+               ixgbe_sysfs_create(adapter);
+#endif
+
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) {
-               err = dca_add_requester(&pdev->dev);
-               if (err == 0) {
+               if (dca_add_requester(&pdev->dev) == 0) {
                        adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+                       /* always use CB2 mode, difference is masked
+                        * in the CB driver */
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
                        ixgbe_setup_dca(adapter);
-               } else {
-                       DPRINTK(PROBE, INFO, "DCA registration failed: %d\n",
-                               err);
                }
        }
 
@@ -6741,8 +5480,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        /* print all messages at the end so that we use our eth%d name */
        /* print bus type/speed/width info */
        DPRINTK(PROBE, INFO, "(PCI Express:%s:%s) ",
-               ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
-                (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
+               ((hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
                 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
                 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
                 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
@@ -6752,15 +5490,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        for (i = 0; i < 6; i++)
                printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
 
-       ixgbe_read_pba_num(hw, &part_num);
-       if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-               DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, SFP+: %d, PBA No: 
%06x-%03x\n",
-                       hw->mac.type, hw->phy.type, hw->phy.sfp_type,
-                       (part_num >> 8), (part_num & 0xff));
+       if ((hw->phy.type == ixgbe_phy_nl) &&
+           (hw->phy.sfp_type != ixgbe_sfp_type_not_present))
+               DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, SFP+: %d\n",
+                       hw->mac.type, hw->phy.type, hw->phy.sfp_type);
        else
-               DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-                       hw->mac.type, hw->phy.type,
-                       (part_num >> 8), (part_num & 0xff));
+               DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d\n",
+                       hw->mac.type, hw->phy.type);
 
        if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
                DPRINTK(PROBE, WARNING, "PCI-Express bandwidth available for "
@@ -6770,37 +5506,26 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                        "PCI-Express slot is required.\n");
        }
 
+#ifndef IXGBE_NO_INET_LRO
+       DPRINTK(PROBE, INFO, "In-kernel LRO is enabled \n");
+#else
 #ifndef IXGBE_NO_LRO
-       if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED)
-               DPRINTK(PROBE, INFO, "Internal LRO is enabled \n");
-       else
-               DPRINTK(PROBE, INFO, "LRO is disabled \n");
+       DPRINTK(PROBE, INFO, "Internal LRO is enabled \n");
+#else
+       DPRINTK(PROBE, INFO, "LRO is disabled \n");
 #endif
-#ifndef IXGBE_NO_HW_RSC
-       if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-               DPRINTK(PROBE, INFO, "HW RSC is enabled \n");
 #endif
-#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
-       /* add san mac addr to netdev */
-       ixgbe_add_sanmac_netdev(netdev);
-
-#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
        DPRINTK(PROBE, INFO, "Intel(R) 10 Gigabit Network Connection\n");
        cards_found++;
        return 0;
 
 err_register:
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
-           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
-               cancel_work_sync(&adapter->fdir_reinit_task);
-       ixgbe_clear_interrupt_scheme(adapter);
        ixgbe_release_hw_control(adapter);
 err_sw_init:
        clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
        del_timer_sync(&adapter->sfp_timer);
        cancel_work_sync(&adapter->sfp_task);
-       cancel_work_sync(&adapter->multispeed_fiber_task);
-       cancel_work_sync(&adapter->sfp_config_module_task);
+       ixgbe_reset_interrupt_capability(adapter);
 #ifdef IXGBE_TCP_TIMER
        iounmap(adapter->msix_addr);
 err_map_msix:
@@ -6831,20 +5556,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        set_bit(__IXGBE_DOWN, &adapter->state);
-       /*
-        * clear the module not found bit to make sure the worker won't
-        * reschedule
-        */
+       /* clear the module not found bit to make sure the worker won't
+        * reschedule */
        clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->sfp_timer);
        cancel_work_sync(&adapter->watchdog_task);
        cancel_work_sync(&adapter->sfp_task);
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
-           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
-               cancel_work_sync(&adapter->fdir_reinit_task);
-       cancel_work_sync(&adapter->multispeed_fiber_task);
-       cancel_work_sync(&adapter->sfp_config_module_task);
        flush_scheduled_work();
 
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
@@ -6855,17 +5573,22 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        }
 
 #endif
-#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
-       /* remove the added san mac */
-       ixgbe_del_sanmac_netdev(netdev);
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       if (netdev->vmq) {
+               free_vmq(netdev->vmq);
+               netdev->vmq = 0;
+       }
+#endif
 
-#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
-       if (adapter->netdev_registered) {
+#ifndef CONFIG_XEN_NETDEV2_VMQ
+       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+               ixgbe_sysfs_remove(adapter);
+#endif
+       if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
-               adapter->netdev_registered = false;
-       }
 
-       ixgbe_clear_interrupt_scheme(adapter);
+       ixgbe_reset_interrupt_capability(adapter);
+
        ixgbe_release_hw_control(adapter);
 
 #ifdef IXGBE_TCP_TIMER
@@ -6875,9 +5598,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        pci_release_regions(pdev);
 
        DPRINTK(PROBE, INFO, "complete\n");
-       free_netdev(netdev);
+#ifdef CONFIG_IXGBE_NAPI
+       ixgbe_napi_del_all(adapter);
+#endif
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
 
-       pci_disable_pcie_error_reporting(pdev);
+       free_netdev(netdev);
 
        pci_disable_device(pdev);
 }
@@ -6891,13 +5618,6 @@ u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
        return value;
 }
 
-void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
-{
-       struct ixgbe_adapter *adapter = hw->back;
-
-       pci_write_config_word(adapter->pdev, reg, value);
-}
-
 #ifdef HAVE_PCI_ERS
 /**
  * ixgbe_io_error_detected - called when PCI error is detected
@@ -6919,7 +5639,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct 
pci_dev *pdev,
                ixgbe_down(adapter);
        pci_disable_device(pdev);
 
-       /* Request a slot reset. */
+       /* Request a slot reset */
        return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -6933,26 +5653,21 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct 
pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       pci_ers_result_t result;
 
        if (pci_enable_device(pdev)) {
                DPRINTK(PROBE, ERR,
                        "Cannot re-enable PCI device after reset.\n");
-               result = PCI_ERS_RESULT_DISCONNECT;
-       } else {
-               pci_set_master(pdev);
-               pci_restore_state(pdev);
-
-               pci_wake_from_d3(pdev, false);
-
-               ixgbe_reset(adapter);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
-               result = PCI_ERS_RESULT_RECOVERED;
+               return PCI_ERS_RESULT_DISCONNECT;
        }
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
 
-       pci_cleanup_aer_uncorrect_error_status(pdev);
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
 
-       return result;
+       ixgbe_reset(adapter);
+
+       return PCI_ERS_RESULT_RECOVERED;
 }
 
 /**
@@ -7017,21 +5732,17 @@ bool ixgbe_is_ixgbe(struct pci_dev *pcidev)
  **/
 static int __init ixgbe_init_module(void)
 {
-       int ret;
        printk(KERN_INFO "ixgbe: %s - version %s\n", ixgbe_driver_string,
               ixgbe_driver_version);
 
        printk(KERN_INFO "%s\n", ixgbe_copyright);
 
-#ifndef CONFIG_DCB
        ixgbe_dcb_netlink_register();
-#endif
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        dca_register_notify(&dca_notifier);
 
 #endif
-       ret = pci_register_driver(&ixgbe_driver);
-       return ret;
+       return pci_register_driver(&ixgbe_driver);
 }
 
 module_init(ixgbe_init_module);
@@ -7047,9 +5758,7 @@ static void __exit ixgbe_exit_module(void)
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        dca_unregister_notify(&dca_notifier);
 #endif
-#ifndef CONFIG_DCB
        ixgbe_dcb_netlink_unregister();
-#endif
        pci_unregister_driver(&ixgbe_driver);
 }
 
@@ -7068,4 +5777,3 @@ static int ixgbe_notify_dca(struct notifier_block *nb, 
unsigned long event,
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */
-
diff --git a/drivers/net/ixgbe/ixgbe_osdep.h b/drivers/net/ixgbe/ixgbe_osdep.h
index eafde20..50da4d4 100644
--- a/drivers/net/ixgbe/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/ixgbe_osdep.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -54,7 +54,7 @@
 #undef ASSERT
 
 #ifdef DBG
-#define hw_dbg(hw, S, A...)    printk(KERN_DEBUG S, ## A)
+#define hw_dbg(hw, S, A...)    printk(KERN_DEBUG S, A)
 #else
 #define hw_dbg(hw, S, A...)      do {} while (0)
 #endif
@@ -87,21 +87,10 @@
 #define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
     readl((a)->hw_addr + (reg) + ((offset) << 2)))
 
-#ifndef writeq
-#define writeq(val, addr) writel((u32) (val), addr); \
-       writel((u32) (val >> 32), (addr + 4));
-#endif
-
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + 
(reg)))
-
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 struct ixgbe_hw;
 extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
-extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
 #define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word
-#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word
 #define IXGBE_EEPROM_GRANT_ATTEMPS 100
-#define IXGBE_HTONL(_i) htonl(_i)
-#define IXGBE_HTONS(_i) htons(_i)
 
 #endif /* _IXGBE_OSDEP_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_param.c b/drivers/net/ixgbe/ixgbe_param.c
index d6ace0c..ba97102 100644
--- a/drivers/net/ixgbe/ixgbe_param.c
+++ b/drivers/net/ixgbe/ixgbe_param.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -96,31 +96,16 @@ IXGBE_PARAM(InterruptType, "Change Interrupt Mode 
(0=Legacy, 1=MSI, 2=MSI-X), de
 IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1");
 
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-/* DCA - Direct Cache Access (DCA) Control
+/* DCA - Direct Cache Access (DCA) Enable/Disable
  *
- * This option allows the device to hint to DCA enabled processors
- * which CPU should have its cache warmed with the data being
- * transferred over PCIe.  This can increase performance by reducing
- * cache misses.  ixgbe hardware supports DCA for:
- * tx descriptor writeback
- * rx descriptor writeback
- * rx data
- * rx data header only (in packet split mode)
- *
- * enabling option 2 can cause cache thrash in some tests, particularly
- * if the CPU is completely utilized
- *
- * Valid Range: 0 - 2
+ * Valid Range: 0, 1
  *  - 0 - disables DCA
  *  - 1 - enables DCA
- *  - 2 - enables DCA with rx data included
  *
- * Default Value: 2
+ * Default Value: 1
  */
 
-#define IXGBE_MAX_DCA 2
-
-IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, 
1=descriptor only, 2=descriptor and data");
+IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, default 1");
 
 #endif
 /* RSS - Receive-Side Scaling (RSS) Descriptor Queues
@@ -148,14 +133,14 @@ IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device 
Queues: 0/1 = disable (defau
 
 /* Interrupt Throttle Rate (interrupts/sec)
  *
- * Valid Range: 956-488281 (0=off, 1=dynamic)
+ * Valid Range: 100-500000 (0=off)
  *
  * Default Value: 8000
  */
 #define DEFAULT_ITR                 8000
-IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, 
(956-488281), default 8000");
-#define MAX_ITR       IXGBE_MAX_INT_RATE
-#define MIN_ITR       IXGBE_MIN_INT_RATE
+IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, 
(100-500000), default 8000");
+#define MAX_ITR                   500000
+#define MIN_ITR                      100
 
 #ifndef IXGBE_NO_LLI
 /* LLIPort (Low Latency Interrupt TCP Port)
@@ -193,32 +178,22 @@ IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet 
Size (0-1500)");
 #define DEFAULT_LLISIZE                0
 #define MAX_LLISIZE                 1500
 #define MIN_LLISIZE                    0
+#endif /* IXGBE_NO_LLI */
 
-/* LLIEType (Low Latency Interrupt Ethernet Type)
- *
- * Valid Range: 0 - 0x8fff
- *
- * Default Value: 0 (disabled)
- */
-IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type");
-
-#define DEFAULT_LLIETYPE               0
-#define MAX_LLIETYPE              0x8fff
-#define MIN_LLIETYPE                   0
-
-/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold)
+#ifndef IXGBE_NO_INET_LRO
+/* LROAggr (Large Receive Offload)
  *
- * Valid Range: 0 - 7
+ * Valid Range: 2 - 44
  *
- * Default Value: 0 (disabled)
+ * Default Value:  32
  */
-IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold");
+IXGBE_PARAM(LROAggr, "LRO - Maximum packets to aggregate");
 
-#define DEFAULT_LLIVLANP               0
-#define MAX_LLIVLANP                   7
-#define MIN_LLIVLANP                   0
+#define DEFAULT_LRO_AGGR              32
+#define MAX_LRO_AGGR                  44
+#define MIN_LRO_AGGR                   2
 
-#endif /* IXGBE_NO_LLI */
+#endif
 /* Rx buffer mode
  *
  * Valid Range: 0-2 0 = 1buf_mode_always, 1 = ps_mode_always and 2 = optimal
@@ -234,51 +209,7 @@ IXGBE_PARAM(RxBufferMode, "0=1 descriptor per packet,\n"
 #define IXGBE_RXBUFMODE_OPTIMAL                                2
 #define IXGBE_DEFAULT_RXBUFMODE          IXGBE_RXBUFMODE_OPTIMAL
 
-/* Flow Director filtering mode
- *
- * Valid Range: 0-2  0 = off, 1 = Hashing (ATR), and 2 = perfect filters
- *
- * Default Value: 1 (ATR)
- */
-IXGBE_PARAM(FdirMode, "Flow Director filtering modes:\n"
-                     "\t\t\t0 = Filtering off\n"
-                     "\t\t\t1 = Signature Hashing filters (SW ATR)\n"
-                     "\t\t\t2 = Perfect Filters");
-
-#define IXGBE_FDIR_FILTER_OFF                          0
-#define IXGBE_FDIR_FILTER_HASH                         1
-#define IXGBE_FDIR_FILTER_PERFECT                      2
-#define IXGBE_DEFAULT_FDIR_FILTER  IXGBE_FDIR_FILTER_HASH
-
-/* Flow Director packet buffer allocation level
- *
- * Valid Range: 0-2  0 = 8k hash/2k perfect, 1 = 16k hash/4k perfect,
- *                   2 = 32k hash/8k perfect
- *
- * Default Value: 0
- */
-IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n"
-                        "\t\t\t0 = 8k hash filters or 2k perfect filters\n"
-                        "\t\t\t1 = 16k hash filters or 4k perfect filters\n"
-                        "\t\t\t2 = 32k hash filters or 8k perfect filters");
 
-#define IXGBE_FDIR_PBALLOC_64K                         0
-#define IXGBE_FDIR_PBALLOC_128K                                1
-#define IXGBE_FDIR_PBALLOC_256K                                2
-#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K
-
-/* Software ATR packet sample rate
- *
- * Valid Range: 0-100  0 = off, 1-100 = rate of Tx packet inspection
- *
- * Default Value: 20
- */
-IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate");
-
-#define IXGBE_MAX_ATR_SAMPLE_RATE      100
-#define IXGBE_MIN_ATR_SAMPLE_RATE        1
-#define IXGBE_ATR_SAMPLE_RATE_OFF        0
-#define IXGBE_DEFAULT_ATR_SAMPLE_RATE   20
 
 struct ixgbe_option {
        enum { enable_option, range_option, list_option } type;
@@ -292,7 +223,7 @@ struct ixgbe_option {
                } r;
                struct { /* list_option info */
                        int nr;
-                       const struct ixgbe_opt_list {
+                       struct ixgbe_opt_list {
                                int i;
                                char *str;
                        } *p;
@@ -327,7 +258,7 @@ static int __devinit ixgbe_validate_option(unsigned int 
*value,
                break;
        case list_option: {
                int i;
-               const struct ixgbe_opt_list *ent;
+               struct ixgbe_opt_list *ent;
 
                for (i = 0; i < opt->arg.l.nr; i++) {
                        ent = &opt->arg.l.p[i];
@@ -363,8 +294,6 @@ static int __devinit ixgbe_validate_option(unsigned int 
*value,
 void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter)
 {
        int bd = adapter->bd_number;
-       u32 *aflags = &adapter->flags;
-       struct ixgbe_ring_feature *feature = adapter->ring_feature;
 
        if (bd >= IXGBE_MAX_NIC) {
                printk(KERN_NOTICE
@@ -394,32 +323,32 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
                        ixgbe_validate_option(&i_type, &opt);
                        switch (i_type) {
                        case IXGBE_INT_MSIX:
-                               if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE))
+                               if (!adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)
                                        printk(KERN_INFO
                                               "Ignoring MSI-X setting; "
-                                              "support unavailable\n");
+                                              "support unavailable.\n");
                                break;
                        case IXGBE_INT_MSI:
-                               if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) {
+                               if (!adapter->flags & IXGBE_FLAG_MSI_CAPABLE) {
                                        printk(KERN_INFO
                                               "Ignoring MSI setting; "
-                                              "support unavailable\n");
+                                              "support unavailable.\n");
                                } else {
-                                       *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
-                                       *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
+                                       adapter->flags &= 
~IXGBE_FLAG_MSIX_CAPABLE;
+                                       adapter->flags &= 
~IXGBE_FLAG_DCB_CAPABLE;
                                }
                                break;
                        case IXGBE_INT_LEGACY:
                        default:
-                               *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
-                               *aflags &= ~IXGBE_FLAG_MSI_CAPABLE;
-                               *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
+                               adapter->flags &= ~IXGBE_FLAG_MSIX_CAPABLE;
+                               adapter->flags &= ~IXGBE_FLAG_MSI_CAPABLE;
+                               adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
                                break;
                        }
 #ifdef module_param_array
                } else {
-                       *aflags |= IXGBE_FLAG_MSIX_CAPABLE;
-                       *aflags |= IXGBE_FLAG_MSI_CAPABLE;
+                       adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
+                       adapter->flags |= IXGBE_FLAG_MSI_CAPABLE;
                }
 #endif
        }
@@ -437,35 +366,33 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
                        unsigned int mq = MQ[bd];
                        ixgbe_validate_option(&mq, &opt);
                        if (mq)
-                               *aflags |= IXGBE_FLAG_MQ_CAPABLE;
+                               adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
                        else
-                               *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
+                               adapter->flags &= ~IXGBE_FLAG_MQ_CAPABLE;
 #ifdef module_param_array
                } else {
                        if (opt.def == OPTION_ENABLED)
-                               *aflags |= IXGBE_FLAG_MQ_CAPABLE;
+                               adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
                        else
-                               *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
+                               adapter->flags &= ~IXGBE_FLAG_MQ_CAPABLE;
                }
 #endif
                /* Check Interoperability */
-               if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) &&
-                   !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) {
+               if ((adapter->flags & IXGBE_FLAG_MQ_CAPABLE) &&
+                   !(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) {
                        DPRINTK(PROBE, INFO,
                                "Multiple queues are not supported while MSI-X "
                                "is disabled.  Disabling Multiple Queues.\n");
-                       *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
+                       adapter->flags &= ~IXGBE_FLAG_MQ_CAPABLE;
                }
        }
 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        { /* Direct Cache Access (DCA) */
                static struct ixgbe_option opt = {
-                       .type = range_option,
+                       .type = enable_option,
                        .name = "Direct Cache Access (DCA)",
                        .err  = "defaulting to Enabled",
-                       .def  = IXGBE_MAX_DCA,
-                       .arg  = { .r = { .min = OPTION_DISABLED,
-                                        .max = IXGBE_MAX_DCA}}
+                       .def  = OPTION_ENABLED
                };
                unsigned int dca = opt.def;
 
@@ -475,29 +402,21 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
                        dca = DCA[bd];
                        ixgbe_validate_option(&dca, &opt);
                        if (!dca)
-                               *aflags &= ~IXGBE_FLAG_DCA_CAPABLE;
+                               adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
 
                        /* Check Interoperability */
-                       if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) {
+                       if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) {
                                DPRINTK(PROBE, INFO, "DCA is disabled\n");
-                               *aflags &= ~IXGBE_FLAG_DCA_ENABLED;
-                       }
-
-                       if (dca == IXGBE_MAX_DCA) {
-                               DPRINTK(PROBE, INFO,
-                                       "DCA enabled for rx data\n");
-                               adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA;
+                               adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
                        }
 #ifdef module_param_array
                } else {
                        /* make sure to clear the capability flag if the
                         * option is disabled by default above */
                        if (opt.def == OPTION_DISABLED)
-                               *aflags &= ~IXGBE_FLAG_DCA_CAPABLE;
+                               adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
                }
 #endif
-               if (dca == IXGBE_MAX_DCA)
-                       adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA;
        }
 #endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
        { /* Receive-Side Scaling (RSS) */
@@ -527,42 +446,39 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
                                ixgbe_validate_option(&rss, &opt);
                                break;
                        }
-                       feature[RING_F_RSS].indices = rss;
+                       adapter->ring_feature[RING_F_RSS].indices = rss;
                        if (rss)
-                               *aflags |= IXGBE_FLAG_RSS_ENABLED;
+                               adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
                        else
-                               *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
+                               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 #ifdef module_param_array
                } else {
                        if (opt.def == OPTION_DISABLED) {
-                               *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
+                               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
                        } else {
                                rss = min(IXGBE_MAX_RSS_INDICES,
                                          (int)num_online_cpus());
-                               feature[RING_F_RSS].indices = rss;
-                               if (rss)
-                                       *aflags |= IXGBE_FLAG_RSS_ENABLED;
-                               else
-                                       *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
+                               adapter->ring_feature[RING_F_RSS].indices = rss;
+                               adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
                        }
                }
 #endif
                /* Check Interoperability */
-               if (*aflags & IXGBE_FLAG_RSS_ENABLED) {
-                       if (!(*aflags & IXGBE_FLAG_RSS_CAPABLE)) {
+               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+                       if (!(adapter->flags & IXGBE_FLAG_RSS_CAPABLE)) {
                                DPRINTK(PROBE, INFO,
                                        "RSS is not supported on this "
                                        "hardware.  Disabling RSS.\n");
-                               *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
-                               feature[RING_F_RSS].indices = 0;
-                       } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
+                               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+                               adapter->ring_feature[RING_F_RSS].indices = 0;
+                       } else if (!(adapter->flags & IXGBE_FLAG_MQ_CAPABLE)) {
                                DPRINTK(PROBE, INFO,
                                        "RSS is not supported while multiple "
                                        "queues are disabled.  "
                                        "Disabling RSS.\n");
-                               *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
-                               *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
-                               feature[RING_F_RSS].indices = 0;
+                               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+                               adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+                               adapter->ring_feature[RING_F_RSS].indices = 0;
                        }
                }
        }
@@ -581,51 +497,42 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
 #endif
                        unsigned int vmdq = VMDQ[bd];
                        ixgbe_validate_option(&vmdq, &opt);
-                       feature[RING_F_VMDQ].indices = vmdq;
-                       adapter->flags2 |= IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE;
+                       adapter->ring_feature[RING_F_VMDQ].indices = vmdq;
                        /* zero or one both mean disabled from our driver's
                         * perspective */
                        if (vmdq > 1)
-                               *aflags |= IXGBE_FLAG_VMDQ_ENABLED;
+                               adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
                        else
-                               *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+                               adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
 #ifdef module_param_array
                } else {
                        if (opt.def == OPTION_DISABLED) {
-                               *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+                               adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
                        } else {
-                               feature[RING_F_VMDQ].indices = 8;
-                               *aflags |= IXGBE_FLAG_VMDQ_ENABLED;
+                               adapter->ring_feature[RING_F_VMDQ].indices = 8;
+                               adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
                        }
                }
 #endif
                /* Check Interoperability */
-               if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) {
-                       if (!(*aflags & IXGBE_FLAG_VMDQ_CAPABLE)) {
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+                       if (!(adapter->flags & IXGBE_FLAG_VMDQ_CAPABLE)) {
                                DPRINTK(PROBE, INFO,
                                        "VMDQ is not supported on this "
                                        "hardware.  Disabling VMDQ.\n");
-                               *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
-                               feature[RING_F_VMDQ].indices = 0;
-                       } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
+                               adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+                               adapter->ring_feature[RING_F_VMDQ].indices = 0;
+                       } else if (!(adapter->flags & IXGBE_FLAG_MQ_CAPABLE)) {
                                DPRINTK(PROBE, INFO,
                                        "VMDQ is not supported while multiple "
                                        "queues are disabled.  "
                                        "Disabling VMDQ.\n");
-                               *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
-                               feature[RING_F_VMDQ].indices = 0;
-                       }
-                       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                               /* for now, disable RSS when using VMDQ mode */
-                               *aflags &= ~IXGBE_FLAG_RSS_CAPABLE;
-                               *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
-                       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                               if (feature[RING_F_RSS].indices > 2
-                                   && feature[RING_F_VMDQ].indices > 32)
-                                       feature[RING_F_RSS].indices = 2;
-                               else if (feature[RING_F_RSS].indices != 0)
-                                       feature[RING_F_RSS].indices = 4;
+                               adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+                               adapter->ring_feature[RING_F_VMDQ].indices = 0;
                        }
+                       /* for now, disable RSS when using VMDQ mode */
+                       adapter->flags &= ~IXGBE_FLAG_RSS_CAPABLE;
+                       adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
                }
        }
        { /* Interrupt Throttling Rate */
@@ -637,42 +544,38 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
                        .arg  = { .r = { .min = MIN_ITR,
                                         .max = MAX_ITR }}
                };
+               u32 eitr;
 
 #ifdef module_param_array
                if (num_InterruptThrottleRate > bd) {
 #endif
-                       u32 eitr = InterruptThrottleRate[bd];
+                       eitr = InterruptThrottleRate[bd];
                        switch (eitr) {
                        case 0:
                                DPRINTK(PROBE, INFO, "%s turned off\n",
                                        opt.name);
-                               /*
-                                * zero is a special value, we don't want to
+                               /* zero is a special value, we don't want to
                                 * turn off ITR completely, just set it to an
-                                * insane interrupt rate
-                                */
-                               adapter->eitr_param = IXGBE_MAX_INT_RATE;
-                               adapter->itr_setting = 0;
+                                * insane interrupt rate (like 3.5 Million
+                                * ints/s */
+                               eitr = EITR_REG_TO_INTS_PER_SEC(1);
                                break;
                        case 1:
                                DPRINTK(PROBE, INFO, "dynamic interrupt "
                                         "throttling enabled\n");
-                               adapter->eitr_param = 20000;
                                adapter->itr_setting = 1;
+                               eitr = DEFAULT_ITR;
                                break;
                        default:
                                ixgbe_validate_option(&eitr, &opt);
-                               adapter->eitr_param = eitr;
-                               /* the first bit is used as control */
-                               adapter->itr_setting = eitr & ~1;
                                break;
                        }
 #ifdef module_param_array
                } else {
-                       adapter->eitr_param = DEFAULT_ITR;
-                       adapter->itr_setting = DEFAULT_ITR;
+                       eitr = DEFAULT_ITR;
                }
 #endif
+               adapter->eitr_param = eitr;
        }
 #ifndef IXGBE_NO_LLI
        { /* Low Latency Interrupt TCP Port*/
@@ -743,73 +646,47 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
                        unsigned int lli_push = LLIPush[bd];
                        ixgbe_validate_option(&lli_push, &opt);
                        if (lli_push)
-                               *aflags |= IXGBE_FLAG_LLI_PUSH;
+                               adapter->flags |= IXGBE_FLAG_LLI_PUSH;
                        else
-                               *aflags &= ~IXGBE_FLAG_LLI_PUSH;
+                               adapter->flags &= ~IXGBE_FLAG_LLI_PUSH;
 #ifdef module_param_array
                } else {
                        if (opt.def == OPTION_ENABLED)
-                               *aflags |= IXGBE_FLAG_LLI_PUSH;
+                               adapter->flags |= IXGBE_FLAG_LLI_PUSH;
                        else
-                               *aflags &= ~IXGBE_FLAG_LLI_PUSH;
+                               adapter->flags &= ~IXGBE_FLAG_LLI_PUSH;
                }
 #endif
        }
-       { /* Low Latency Interrupt EtherType*/
-               static struct ixgbe_option opt = {
-                       .type = range_option,
-                       .name = "Low Latency Interrupt on Ethernet Protocol 
Type",
-                       .err  = "using default of "
-                                       __MODULE_STRING(DEFAULT_LLIETYPE),
-                       .def  = DEFAULT_LLIETYPE,
-                       .arg  = { .r = { .min = MIN_LLIETYPE,
-                                        .max = MAX_LLIETYPE }}
-               };
-
-#ifdef module_param_array
-               if (num_LLIEType > bd) {
-#endif
-                       adapter->lli_etype = LLIEType[bd];
-                       if (adapter->lli_etype) {
-                               ixgbe_validate_option(&adapter->lli_etype, 
&opt);
-                       } else {
-                               DPRINTK(PROBE, INFO, "%s turned off\n",
-                                       opt.name);
-                       }
-#ifdef module_param_array
-               } else {
-                       adapter->lli_etype = opt.def;
-               }
-#endif
-       }
-       { /* LLI VLAN Priority */
+#endif /* IXGBE_NO_LLI */
+#ifndef IXGBE_NO_INET_LRO
+       { /* Large Receive Offload - Maximum packets to aggregate */
                static struct ixgbe_option opt = {
                        .type = range_option,
-                       .name = "Low Latency Interrupt on VLAN priority 
threashold",
-                       .err  = "using default of "
-                                       __MODULE_STRING(DEFAULT_LLIVLANP),
-                       .def  = DEFAULT_LLIVLANP,
-                       .arg  = { .r = { .min = MIN_LLIVLANP,
-                                        .max = MAX_LLIVLANP }}
+                       .name = "LRO - Maximum packets to aggregate",
+                       .err  = "using default of " 
__MODULE_STRING(DEFAULT_LRO_AGGR),
+                       .def  = DEFAULT_LRO_AGGR,
+                       .arg  = { .r = { .min = MIN_LRO_AGGR,
+                                        .max = MAX_LRO_AGGR }}
                };
 
 #ifdef module_param_array
-               if (num_LLIVLANP > bd) {
+               if (num_LROAggr > bd) {
 #endif
-                       adapter->lli_vlan_pri = LLIVLANP[bd];
-                       if (adapter->lli_vlan_pri) {
-                               ixgbe_validate_option(&adapter->lli_vlan_pri, 
&opt);
+                       adapter->lro_max_aggr = LROAggr[bd];
+                       if (adapter->lro_max_aggr) {
+                               ixgbe_validate_option(&adapter->lro_max_aggr, 
&opt);
                        } else {
                                DPRINTK(PROBE, INFO, "%s turned off\n",
                                        opt.name);
                        }
 #ifdef module_param_array
                } else {
-                       adapter->lli_vlan_pri = opt.def;
+                       adapter->lro_max_aggr = opt.def;
                }
 #endif
        }
-#endif /* IXGBE_NO_LLI */
+#endif /* IXGBE_NO_INET_LRO */
        { /* Rx buffer mode */
                unsigned int rx_buf_mode;
                static struct ixgbe_option opt = {
@@ -829,202 +706,31 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter 
*adapter)
                        ixgbe_validate_option(&rx_buf_mode, &opt);
                        switch (rx_buf_mode) {
                        case IXGBE_RXBUFMODE_OPTIMAL:
-                               *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
-                               *aflags |= IXGBE_FLAG_RX_PS_CAPABLE;
+                               adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+                               adapter->flags |= IXGBE_FLAG_RX_PS_CAPABLE;
                                break;
                        case IXGBE_RXBUFMODE_PS_ALWAYS:
-                               *aflags |= IXGBE_FLAG_RX_PS_CAPABLE;
+                               adapter->flags |= IXGBE_FLAG_RX_PS_CAPABLE;
                                break;
                        case IXGBE_RXBUFMODE_1BUF_ALWAYS:
-                               *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+                               adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
                        default:
                                break;
                        }
 #ifdef module_param_array
                } else {
-                       *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
-                       *aflags |= IXGBE_FLAG_RX_PS_CAPABLE;
+                       adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+                       adapter->flags |= IXGBE_FLAG_RX_PS_CAPABLE;
                }
 #endif
+#ifdef CONFIG_XEN_NETDEV2_VMQ
+       if ((adapter->flags &
+             (IXGBE_FLAG_RX_PS_CAPABLE | IXGBE_FLAG_VMDQ_ENABLED)) ==
+             (IXGBE_FLAG_RX_PS_CAPABLE | IXGBE_FLAG_VMDQ_ENABLED)) {
+               printk(KERN_INFO "ixgbe: packet split disabled for Xen VMDQ\n");
+               adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
        }
-       { /* Flow Director filtering mode */
-               unsigned int fdir_filter_mode;
-               static struct ixgbe_option opt = {
-                       .type = range_option,
-                       .name = "Flow Director filtering mode",
-                       .err = "using default of "
-                               __MODULE_STRING(IXGBE_DEFAULT_FDIR_FILTER),
-                       .def = IXGBE_DEFAULT_FDIR_FILTER,
-                       .arg = {.r = {.min = IXGBE_FDIR_FILTER_OFF,
-                                     .max = IXGBE_FDIR_FILTER_PERFECT}}
-               };
-
-               *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-               *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-                       goto no_flow_director;
-#ifdef module_param_array
-               if (num_FdirMode > bd) {
 #endif
-#ifdef HAVE_TX_MQ
-                       fdir_filter_mode = FdirMode[bd];
-#else
-                       fdir_filter_mode = IXGBE_FDIR_FILTER_OFF;
-#endif /* HAVE_TX_MQ */
-                       ixgbe_validate_option(&fdir_filter_mode, &opt);
-
-                       switch (fdir_filter_mode) {
-                       case IXGBE_FDIR_FILTER_OFF:
-                               DPRINTK(PROBE, INFO, "Flow Director 
disabled\n");
-                               break;
-                       case IXGBE_FDIR_FILTER_HASH:
-                               *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                               *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-                               feature[RING_F_FDIR].indices =
-                                       IXGBE_MAX_FDIR_INDICES;
-                               DPRINTK(PROBE, INFO,
-                                       "Flow Director hash filtering 
enabled\n");
-                               break;
-                       case IXGBE_FDIR_FILTER_PERFECT:
-                               *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                               *aflags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-                               feature[RING_F_FDIR].indices =
-                                       IXGBE_MAX_FDIR_INDICES;
-                               spin_lock_init(&adapter->fdir_perfect_lock);
-                               DPRINTK(PROBE, INFO,
-                                       "Flow Director perfect filtering 
enabled\n");
-                               break;
-                       default:
-                               break;
-                       }
-#ifdef module_param_array
-               } else {
-#ifdef HAVE_TX_MQ
-                       *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                       feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES;
-                       DPRINTK(PROBE, INFO,
-                               "Flow Director hash filtering enabled\n");
-#else
-                       *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                       *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-                       feature[RING_F_FDIR].indices = 0;
-                       DPRINTK(PROBE, INFO,
-                               "Flow Director hash filtering disabled\n");
-#endif /* HAVE_TX_MQ */
-               }
-               /* Check interoperability */
-               if ((*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
-                   (*aflags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
-                       if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
-                               DPRINTK(PROBE, INFO,
-                                       "Flow Director is not supported "
-                                       "while multiple queues are disabled. "
-                                       "Disabling Flow Director\n");
-                               *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                               *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-                       }
-               }
-#endif
-no_flow_director:
-               /* empty code line with semi-colon */ ;
-       }
-       { /* Flow Director packet buffer allocation */
-               unsigned int fdir_pballoc_mode;
-               static struct ixgbe_option opt = {
-                       .type = range_option,
-                       .name = "Flow Director packet buffer allocation",
-                       .err = "using default of "
-                               __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC),
-                       .def = IXGBE_DEFAULT_FDIR_PBALLOC,
-                       .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K,
-                                     .max = IXGBE_FDIR_PBALLOC_256K}}
-               };
-
-               if ((adapter->hw.mac.type == ixgbe_mac_82598EB) ||
-                   (!(*aflags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
-                                 IXGBE_FLAG_FDIR_PERFECT_CAPABLE))))
-                       goto no_fdir_pballoc;
-#ifdef module_param_array
-               if (num_FdirPballoc > bd) {
-#endif
-                       char pstring[10];
-                       fdir_pballoc_mode = FdirPballoc[bd];
-                       ixgbe_validate_option(&fdir_pballoc_mode, &opt);
-                       switch (fdir_pballoc_mode) {
-                       case IXGBE_FDIR_PBALLOC_64K:
-                               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
-                               sprintf(pstring, "64kB");
-                               break;
-                       case IXGBE_FDIR_PBALLOC_128K:
-                               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K;
-                               sprintf(pstring, "128kB");
-                               break;
-                       case IXGBE_FDIR_PBALLOC_256K:
-                               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K;
-                               sprintf(pstring, "256kB");
-                               break;
-                       default:
-                               break;
-                       }
-                       DPRINTK(PROBE, INFO,
-                               "Flow Director allocated %s of packet buffer\n",
-                               pstring);
-
-#ifdef module_param_array
-               } else {
-                       adapter->fdir_pballoc = opt.def;
-                       DPRINTK(PROBE, INFO,
-                            "Flow Director allocated 64kB of packet buffer\n");
-
-               }
-#endif
-no_fdir_pballoc:
-               /* empty code line with semi-colon */ ;
-       }
-       { /* Flow Director ATR Tx sample packet rate */
-               static struct ixgbe_option opt = {
-                       .type = range_option,
-                       .name = "Software ATR Tx packet sample rate",
-                       .err = "using default of "
-                               __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE),
-                       .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE,
-                       .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF,
-                                     .max = IXGBE_MAX_ATR_SAMPLE_RATE}}
-               };
-               static const char atr_string[] =
-                                           "ATR Tx Packet sample rate set to";
-
-               adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF;
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-                       goto no_fdir_sample;
-
-               /* no sample rate for perfect filtering */
-               if (*aflags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
-                       goto no_fdir_sample;
-#ifdef module_param_array
-               if (num_AtrSampleRate > bd) {
-#endif
-                       /* Only enable the sample rate if hashing (ATR) is on */
-                       if (*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-                               adapter->atr_sample_rate = AtrSampleRate[bd];
-
-                       if (adapter->atr_sample_rate) {
-                               ixgbe_validate_option(&adapter->atr_sample_rate,
-                                                     &opt);
-                               DPRINTK(PROBE, INFO, "%s %d\n", atr_string,
-                                       adapter->atr_sample_rate);
-                       }
-#ifdef module_param_array
-               } else {
-                       /* Only enable the sample rate if hashing (ATR) is on */
-                       if (*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-                               adapter->atr_sample_rate = opt.def;
-
-                       DPRINTK(PROBE, INFO, "%s default of %d\n", atr_string,
-                               adapter->atr_sample_rate);
-               }
-#endif
-no_fdir_sample:
-               /* empty code line with semi-colon */ ;
        }
 }
+
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 530d858..a8f6af2 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -29,19 +29,6 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
-static void ixgbe_i2c_start(struct ixgbe_hw *hw);
-static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
-static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
-static bool ixgbe_get_i2c_data(u32 *i2cctl);
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
-
 /**
  *  ixgbe_init_phy_ops_generic - Inits PHY function ptrs
  *  @hw: pointer to the hardware structure
@@ -61,11 +48,6 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
        phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
        phy->ops.check_link = NULL;
        phy->ops.get_firmware_version = NULL;
-       phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
-       phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
-       phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
-       phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
-       phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
        phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
        phy->sfp_type = ixgbe_sfp_type_unknown;
 
@@ -82,7 +64,6 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
        u32 phy_addr;
-       u16 ext_ability = 0;
 
        if (hw->phy.type == ixgbe_phy_unknown) {
                for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -91,29 +72,10 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
                                ixgbe_get_phy_id(hw);
                                hw->phy.type =
                                        ixgbe_get_phy_type_from_id(hw->phy.id);
-
-                               if (hw->phy.type == ixgbe_phy_unknown) {
-                                       hw->phy.ops.read_reg(hw,
-                                                 IXGBE_MDIO_PHY_EXT_ABILITY,
-                                                 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-                                                 &ext_ability);
-                                       if (ext_ability &
-                                           IXGBE_MDIO_PHY_10GBASET_ABILITY ||
-                                           ext_ability &
-                                           IXGBE_MDIO_PHY_1000BASET_ABILITY)
-                                               hw->phy.type =
-                                                        ixgbe_phy_cu_unknown;
-                                       else
-                                               hw->phy.type =
-                                                        ixgbe_phy_generic;
-                               }
-
                                status = 0;
                                break;
                        }
                }
-               if (status != 0)
-                       hw->phy.addr = 0;
        } else {
                status = 0;
        }
@@ -180,9 +142,6 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
        case TN1010_PHY_ID:
                phy_type = ixgbe_phy_tn;
                break;
-       case AQ1002_PHY_ID:
-               phy_type = ixgbe_phy_aq;
-               break;
        case QT2022_PHY_ID:
                phy_type = ixgbe_phy_qt;
                break;
@@ -204,40 +163,13 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
  **/
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
 {
-       u32 i;
-       u16 ctrl = 0;
-       s32 status = 0;
-
-       if (hw->phy.type == ixgbe_phy_unknown)
-               status = ixgbe_identify_phy_generic(hw);
-
-       if (status != 0 || hw->phy.type == ixgbe_phy_none)
-               goto out;
-
        /*
         * Perform soft PHY reset to the PHY_XS.
         * This will cause a soft reset to the PHY
         */
-       hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-                             IXGBE_MDIO_PHY_XS_DEV_TYPE,
-                             IXGBE_MDIO_PHY_XS_RESET);
-
-       /* Poll for reset bit to self-clear indicating reset is complete */
-       for (i = 0; i < 500; i++) {
-               msleep(1);
-               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-                                    IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
-               if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET))
-                       break;
-       }
-
-       if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
-               status = IXGBE_ERR_RESET_FAILED;
-               hw_dbg(hw, "PHY reset polling failed to complete.\n");
-       }
-
-out:
-       return status;
+       return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                                    IXGBE_MDIO_PHY_XS_DEV_TYPE,
+                                    IXGBE_MDIO_PHY_XS_RESET);
 }
 
 /**
@@ -437,7 +369,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 
reg_addr,
  **/
 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
+       s32 status = IXGBE_NOT_IMPLEMENTED;
        u32 time_out;
        u32 max_time_out = 10;
        u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
@@ -478,6 +410,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 
                autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
                if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+                       status = 0;
                        break;
                }
        }
@@ -512,9 +445,6 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
        if (speed & IXGBE_LINK_SPEED_1GB_FULL)
                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
-       if (speed & IXGBE_LINK_SPEED_100_FULL)
-               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
-
        /* Setup link based on the new speed settings */
        hw->phy.ops.setup_link(hw);
 
@@ -522,40 +452,6 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
 }
 
 /**
- *  ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: boolean auto-negotiation value
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg)
-{
-       s32 status = IXGBE_ERR_LINK_SETUP;
-       u16 speed_ability;
-
-       *speed = 0;
-       *autoneg = true;
-
-       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
-                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-                                     &speed_ability);
-
-       if (status == 0) {
-               if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
-                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
-                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-               if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
-                       *speed |= IXGBE_LINK_SPEED_100_FULL;
-       }
-
-       return status;
-}
-
-/**
  *  ixgbe_check_phy_link_tnx - Determine link and speed status
  *  @hw: pointer to hardware structure
  *
@@ -620,24 +516,6 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
        return status;
 }
 
-
-/**
- *  ixgbe_get_phy_firmware_version_aq - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_aq(struct ixgbe_hw *hw,
-                                       u16 *firmware_version)
-{
-       s32 status = 0;
-
-       status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
-                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-                                     firmware_version);
-
-       return status;
-}
-
 /**
  *  ixgbe_reset_phy_nl - Performs a PHY reset
  *  @hw: pointer to hardware structure
@@ -745,101 +623,45 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw 
*hw)
 {
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
        u32 vendor_oui = 0;
-       enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
        u8 identifier = 0;
        u8 comp_codes_1g = 0;
        u8 comp_codes_10g = 0;
-       u8 oui_bytes[3] = {0, 0, 0};
-       u8 cable_tech = 0;
-       u16 enforce_sfp = 0;
-
-       if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
-               hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-               status = IXGBE_ERR_SFP_NOT_PRESENT;
-               goto out;
-    }
+       u8 oui_bytes[4] = {0, 0, 0, 0};
+       u8 transmission_media = 0;
 
        status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
                                             &identifier);
 
-       if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
-               status = IXGBE_ERR_SFP_NOT_PRESENT;
+       if (status == IXGBE_ERR_SFP_NOT_PRESENT) {
                hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-               if (hw->phy.type != ixgbe_phy_nl) {
-                       hw->phy.id = 0;
-                       hw->phy.type = ixgbe_phy_unknown;
-               }
                goto out;
        }
 
-       /* LAN ID is needed for sfp_type determination */
-       hw->mac.ops.set_lan_id(hw);
-
        if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
                hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
                                           &comp_codes_1g);
                hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
                                           &comp_codes_10g);
-               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
-                                           &cable_tech);
+               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA,
+                                          &transmission_media);
 
                 /* ID Module
                  * =========
                  * 0   SFP_DA_CU
                  * 1   SFP_SR
                  * 2   SFP_LR
-                 * 3   SFP_DA_CORE0 - 82599-specific
-                 * 4   SFP_DA_CORE1 - 82599-specific
-                 * 5   SFP_SR/LR_CORE0 - 82599-specific
-                 * 6   SFP_SR/LR_CORE1 - 82599-specific
                  */
-               if (hw->mac.type == ixgbe_mac_82598EB) {
-                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-                               hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-                               hw->phy.sfp_type = ixgbe_sfp_type_sr;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-                               hw->phy.sfp_type = ixgbe_sfp_type_lr;
-                       else
-                               hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-               } else if (hw->mac.type == ixgbe_mac_82599EB) {
-                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core1;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core1;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core1;
-                       else
-                               hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-               }
-
-               if (hw->phy.sfp_type != stored_sfp_type)
-                       hw->phy.sfp_setup_needed = true;
+               if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+                       hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+               else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+                       hw->phy.sfp_type = ixgbe_sfp_type_sr;
+               else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+                       hw->phy.sfp_type = ixgbe_sfp_type_lr;
+               else
+                       hw->phy.sfp_type = ixgbe_sfp_type_unknown;
 
-               /* Determine if the SFP+ PHY is dual speed or not. */
-               hw->phy.multispeed_fiber = false;
-               if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
-                  (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
-                  ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
-                  (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
-                       hw->phy.multispeed_fiber = true;
                /* Determine PHY vendor */
-               if (hw->phy.type != ixgbe_phy_nl) {
+               if (hw->phy.type == ixgbe_phy_unknown) {
                        hw->phy.id = identifier;
                        hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE0,
@@ -858,7 +680,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
                        switch (vendor_oui) {
                        case IXGBE_SFF_VENDOR_OUI_TYCO:
-                               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                               if (transmission_media &
+                                   IXGBE_SFF_TWIN_AX_CAPABLE)
                                        hw->phy.type = ixgbe_phy_tw_tyco;
                                break;
                        case IXGBE_SFF_VENDOR_OUI_FTL:
@@ -867,50 +690,16 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        case IXGBE_SFF_VENDOR_OUI_AVAGO:
                                hw->phy.type = ixgbe_phy_sfp_avago;
                                break;
-                       case IXGBE_SFF_VENDOR_OUI_INTEL:
-                               hw->phy.type = ixgbe_phy_sfp_intel;
-                               break;
                        default:
-                               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                               if (transmission_media &
+                                   IXGBE_SFF_TWIN_AX_CAPABLE)
                                        hw->phy.type = ixgbe_phy_tw_unknown;
                                else
                                        hw->phy.type = ixgbe_phy_sfp_unknown;
                                break;
                        }
                }
-
-               /* All passive DA cables are supported */
-               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
-                       status = 0;
-                       goto out;
-               }
-
-               /* 1G SFP modules are not supported */
-               if (comp_codes_10g == 0) {
-                       hw->phy.type = ixgbe_phy_sfp_unsupported;
-                       status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                       goto out;
-               }
-
-               /* Anything else 82598-based is supported */
-               if (hw->mac.type == ixgbe_mac_82598EB) {
-                       status = 0;
-                       goto out;
-               }
-
-               ixgbe_get_device_caps(hw, &enforce_sfp);
-               if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
-                       /* Make sure we're a supported PHY type */
-                       if (hw->phy.type == ixgbe_phy_sfp_intel) {
-                               status = 0;
-                       } else {
-                               hw_dbg(hw, "SFP+ module not supported\n");
-                               hw->phy.type = ixgbe_phy_sfp_unsupported;
-                               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                       }
-               } else {
-                       status = 0;
-               }
+               status = 0;
        }
 
 out:
@@ -946,7 +735,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
        hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
 
        if ((!*list_offset) || (*list_offset == 0xFFFF))
-               return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+               return IXGBE_ERR_PHY;
 
        /* Shift offset to first ID word */
        (*list_offset)++;
@@ -982,532 +771,3 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw 
*hw,
        return 0;
 }
 
-/**
- *  ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to read
- *  @eeprom_data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                  u8 *eeprom_data)
-{
-       return hw->phy.ops.read_i2c_byte(hw, byte_offset,
-                                        IXGBE_I2C_EEPROM_DEV_ADDR,
-                                        eeprom_data);
-}
-
-/**
- *  ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C 
interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to write
- *  @eeprom_data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                   u8 eeprom_data)
-{
-       return hw->phy.ops.write_i2c_byte(hw, byte_offset,
-                                         IXGBE_I2C_EEPROM_DEV_ADDR,
-                                         eeprom_data);
-}
-
-/**
- *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to read
- *  @data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface at
- *  a specified deivce address.
- **/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 *data)
-{
-       s32 status = 0;
-       u32 max_retry = 10;
-       u32 retry = 0;
-       u16 swfw_mask = 0;
-       bool nack = 1;
-
-       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-               swfw_mask = IXGBE_GSSR_PHY1_SM;
-       else
-               swfw_mask = IXGBE_GSSR_PHY0_SM;
-
-
-       do {
-               if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
-                       status = IXGBE_ERR_SWFW_SYNC;
-                       goto read_byte_out;
-               }
-
-               ixgbe_i2c_start(hw);
-
-               /* Device Address and write indication */
-               status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_get_i2c_ack(hw);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_get_i2c_ack(hw);
-               if (status != 0)
-                       goto fail;
-
-               ixgbe_i2c_start(hw);
-
-               /* Device Address and read indication */
-               status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_get_i2c_ack(hw);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_clock_in_i2c_byte(hw, data);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_clock_out_i2c_bit(hw, nack);
-               if (status != 0)
-                       goto fail;
-
-               ixgbe_i2c_stop(hw);
-               break;
-
-fail:
-               ixgbe_release_swfw_sync(hw, swfw_mask);
-               msleep(100);
-               ixgbe_i2c_bus_clear(hw);
-               retry++;
-               if (retry < max_retry)
-                       hw_dbg(hw, "I2C byte read error - Retrying.\n");
-               else
-                       hw_dbg(hw, "I2C byte read error.\n");
-
-       } while (retry < max_retry);
-
-       ixgbe_release_swfw_sync(hw, swfw_mask);
-
-read_byte_out:
-       return status;
-}
-
-/**
- *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to write
- *  @data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface at
- *  a specified device address.
- **/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                 u8 dev_addr, u8 data)
-{
-       s32 status = 0;
-       u32 max_retry = 1;
-       u32 retry = 0;
-       u16 swfw_mask = 0;
-
-       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-               swfw_mask = IXGBE_GSSR_PHY1_SM;
-       else
-               swfw_mask = IXGBE_GSSR_PHY0_SM;
-
-       if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
-               status = IXGBE_ERR_SWFW_SYNC;
-               goto write_byte_out;
-       }
-
-       do {
-               ixgbe_i2c_start(hw);
-
-               status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_get_i2c_ack(hw);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_get_i2c_ack(hw);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_clock_out_i2c_byte(hw, data);
-               if (status != 0)
-                       goto fail;
-
-               status = ixgbe_get_i2c_ack(hw);
-               if (status != 0)
-                       goto fail;
-
-               ixgbe_i2c_stop(hw);
-               break;
-
-fail:
-               ixgbe_i2c_bus_clear(hw);
-               retry++;
-               if (retry < max_retry)
-                       hw_dbg(hw, "I2C byte write error - Retrying.\n");
-               else
-                       hw_dbg(hw, "I2C byte write error.\n");
-       } while (retry < max_retry);
-
-       ixgbe_release_swfw_sync(hw, swfw_mask);
-
-write_byte_out:
-       return status;
-}
-
-/**
- *  ixgbe_i2c_start - Sets I2C start condition
- *  @hw: pointer to hardware structure
- *
- *  Sets I2C start condition (High -> Low on SDA while SCL is High)
- **/
-static void ixgbe_i2c_start(struct ixgbe_hw *hw)
-{
-       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
-       /* Start condition must begin with data and clock high */
-       ixgbe_set_i2c_data(hw, &i2cctl, 1);
-       ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-       /* Setup time for start condition (4.7us) */
-       udelay(IXGBE_I2C_T_SU_STA);
-
-       ixgbe_set_i2c_data(hw, &i2cctl, 0);
-
-       /* Hold time for start condition (4us) */
-       udelay(IXGBE_I2C_T_HD_STA);
-
-       ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-       /* Minimum low period of clock is 4.7 us */
-       udelay(IXGBE_I2C_T_LOW);
-
-}
-
-/**
- *  ixgbe_i2c_stop - Sets I2C stop condition
- *  @hw: pointer to hardware structure
- *
- *  Sets I2C stop condition (Low -> High on SDA while SCL is High)
- **/
-static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
-{
-       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
-       /* Stop condition must begin with data low and clock high */
-       ixgbe_set_i2c_data(hw, &i2cctl, 0);
-       ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-       /* Setup time for stop condition (4us) */
-       udelay(IXGBE_I2C_T_SU_STO);
-
-       ixgbe_set_i2c_data(hw, &i2cctl, 1);
-
-       /* bus free time between stop and start (4.7us)*/
-       udelay(IXGBE_I2C_T_BUF);
-}
-
-/**
- *  ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
- *  @hw: pointer to hardware structure
- *  @data: data byte to clock in
- *
- *  Clocks in one byte data via I2C data/clock
- **/
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
-{
-       s32 status = 0;
-       s32 i;
-       bool bit = 0;
-
-       for (i = 7; i >= 0; i--) {
-               status = ixgbe_clock_in_i2c_bit(hw, &bit);
-               *data |= bit<<i;
-
-               if (status != 0)
-                       break;
-       }
-
-       return status;
-}
-
-/**
- *  ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
- *  @hw: pointer to hardware structure
- *  @data: data byte clocked out
- *
- *  Clocks out one byte data via I2C data/clock
- **/
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
-{
-       s32 status = 0;
-       s32 i;
-       u32 i2cctl;
-       bool bit = 0;
-
-       for (i = 7; i >= 0; i--) {
-               bit = (data >> i) & 0x1;
-               status = ixgbe_clock_out_i2c_bit(hw, bit);
-
-               if (status != 0)
-                       break;
-       }
-
-       /* Release SDA line (set high) */
-       i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-       i2cctl |= IXGBE_I2C_DATA_OUT;
-       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
-
-       return status;
-}
-
-/**
- *  ixgbe_get_i2c_ack - Polls for I2C ACK
- *  @hw: pointer to hardware structure
- *
- *  Clocks in/out one bit via I2C data/clock
- **/
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
-{
-       s32 status;
-       u32 i = 0;
-       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-       u32 timeout = 10;
-       bool ack = 1;
-
-       status = ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-       if (status != 0)
-               goto out;
-
-       /* Minimum high period of clock is 4us */
-       udelay(IXGBE_I2C_T_HIGH);
-
-       /* Poll for ACK.  Note that ACK in I2C spec is
-        * transition from 1 to 0 */
-       for (i = 0; i < timeout; i++) {
-               i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-               ack = ixgbe_get_i2c_data(&i2cctl);
-
-               udelay(1);
-               if (ack == 0)
-                       break;
-       }
-
-       if (ack == 1) {
-               hw_dbg(hw, "I2C ack was not received.\n");
-               status = IXGBE_ERR_I2C;
-       }
-
-       ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-       /* Minimum low period of clock is 4.7 us */
-       udelay(IXGBE_I2C_T_LOW);
-
-out:
-       return status;
-}
-
-/**
- *  ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
- *  @hw: pointer to hardware structure
- *  @data: read data value
- *
- *  Clocks in one bit via I2C data/clock
- **/
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
-{
-       s32 status;
-       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
-       status = ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-       /* Minimum high period of clock is 4us */
-       udelay(IXGBE_I2C_T_HIGH);
-
-       i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-       *data = ixgbe_get_i2c_data(&i2cctl);
-
-       ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-       /* Minimum low period of clock is 4.7 us */
-       udelay(IXGBE_I2C_T_LOW);
-
-       return status;
-}
-
-/**
- *  ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
- *  @hw: pointer to hardware structure
- *  @data: data value to write
- *
- *  Clocks out one bit via I2C data/clock
- **/
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
-{
-       s32 status;
-       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
-       status = ixgbe_set_i2c_data(hw, &i2cctl, data);
-       if (status == 0) {
-               status = ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-               /* Minimum high period of clock is 4us */
-               udelay(IXGBE_I2C_T_HIGH);
-
-               ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-               /* Minimum low period of clock is 4.7 us.
-                * This also takes care of the data hold time.
-                */
-               udelay(IXGBE_I2C_T_LOW);
-       } else {
-               status = IXGBE_ERR_I2C;
-               hw_dbg(hw, "I2C data was not set to %X\n", data);
-       }
-
-       return status;
-}
-/**
- *  ixgbe_raise_i2c_clk - Raises the I2C SCL clock
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *
- *  Raises the I2C clock line '0'->'1'
- **/
-static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
-{
-       s32 status = 0;
-
-       *i2cctl |= IXGBE_I2C_CLK_OUT;
-
-       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
-
-       /* SCL rise time (1000ns) */
-       udelay(IXGBE_I2C_T_RISE);
-
-       return status;
-}
-
-/**
- *  ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *
- *  Lowers the I2C clock line '1'->'0'
- **/
-static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
-{
-
-       *i2cctl &= ~IXGBE_I2C_CLK_OUT;
-
-       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
-
-       /* SCL fall time (300ns) */
-       udelay(IXGBE_I2C_T_FALL);
-}
-
-/**
- *  ixgbe_set_i2c_data - Sets the I2C data bit
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *  @data: I2C data value (0 or 1) to set
- *
- *  Sets the I2C data bit
- **/
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
-{
-       s32 status = 0;
-
-       if (data)
-               *i2cctl |= IXGBE_I2C_DATA_OUT;
-       else
-               *i2cctl &= ~IXGBE_I2C_DATA_OUT;
-
-       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
-
-       /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
-       udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
-
-       /* Verify data was set correctly */
-       *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-       if (data != ixgbe_get_i2c_data(i2cctl)) {
-               status = IXGBE_ERR_I2C;
-               hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
-       }
-
-       return status;
-}
-
-/**
- *  ixgbe_get_i2c_data - Reads the I2C SDA data bit
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *
- *  Returns the I2C data bit value
- **/
-static bool ixgbe_get_i2c_data(u32 *i2cctl)
-{
-       bool data;
-
-       if (*i2cctl & IXGBE_I2C_DATA_IN)
-               data = 1;
-       else
-               data = 0;
-
-       return data;
-}
-
-/**
- *  ixgbe_i2c_bus_clear - Clears the I2C bus
- *  @hw: pointer to hardware structure
- *
- *  Clears the I2C bus by sending nine clock pulses.
- *  Used when data line is stuck low.
- **/
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
-{
-       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-       u32 i;
-
-       ixgbe_i2c_start(hw);
-
-       ixgbe_set_i2c_data(hw, &i2cctl, 1);
-
-       for (i = 0; i < 9; i++) {
-               ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-               /* Min high period of clock is 4us */
-               udelay(IXGBE_I2C_T_HIGH);
-
-               ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-               /* Min low period of clock is 4.7us*/
-               udelay(IXGBE_I2C_T_LOW);
-       }
-
-       ixgbe_i2c_start(hw);
-
-       /* Put the i2c bus back to default state */
-       ixgbe_i2c_stop(hw);
-}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 773775e..2b985f8 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -39,12 +39,11 @@
 #define IXGBE_SFF_VENDOR_OUI_BYTE2   0x27
 #define IXGBE_SFF_1GBE_COMP_CODES    0x6
 #define IXGBE_SFF_10GBE_COMP_CODES   0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY   0x8
+#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
 
 /* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
+#define IXGBE_SFF_TWIN_AX_CAPABLE            0x80
 #define IXGBE_SFF_1GBASESX_CAPABLE           0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
 #define IXGBE_SFF_10GBASESR_CAPABLE          0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE          0x20
 #define IXGBE_I2C_EEPROM_READ_MASK           0x100
@@ -55,15 +54,14 @@
 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
 
 /* Bit-shift macros */
-#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    24
-#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    16
-#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT    8
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    12
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    8
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT    4
 
 /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
 #define IXGBE_SFF_VENDOR_OUI_TYCO     0x00407600
 #define IXGBE_SFF_VENDOR_OUI_FTL      0x00906500
 #define IXGBE_SFF_VENDOR_OUI_AVAGO    0x00176A00
-#define IXGBE_SFF_VENDOR_OUI_INTEL    0x001B2100
 
 /* I2C SDA and SCL timing parameters for standard mode */
 #define IXGBE_I2C_T_HD_STA  4
@@ -93,9 +91,6 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
                                        ixgbe_link_speed speed,
                                        bool autoneg,
                                        bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg);
 
 /* PHY specific */
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,20 +98,10 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
                              bool *link_up);
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
                                        u16 *firmware_version);
-s32 ixgbe_get_phy_firmware_version_aq(struct ixgbe_hw *hw,
-                                       u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
                                         u16 *list_offset,
                                         u16 *data_offset);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                 u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                  u8 *eeprom_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                   u8 eeprom_data);
 #endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 2cf6b71..9387965 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -30,13 +30,11 @@
 
 #include "ixgbe_osdep.h"
 
-
 /* Vendor ID */
 #define IXGBE_INTEL_VENDOR_ID   0x8086
 
 /* Device IDs */
 #define IXGBE_DEV_ID_82598               0x10B6
-#define IXGBE_DEV_ID_82598_BX            0x1508
 #define IXGBE_DEV_ID_82598AF_DUAL_PORT   0x10C6
 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
 #define IXGBE_DEV_ID_82598AT             0x10C8
@@ -46,9 +44,6 @@
 #define IXGBE_DEV_ID_82598_DA_DUAL_PORT  0x10F1
 #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1
 #define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4
-#define IXGBE_DEV_ID_82599_KX4     0x10F7
-#define IXGBE_DEV_ID_82599_SFP 0x10FB
-#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
 
 /* General Registers */
 #define IXGBE_CTRL      0x00000
@@ -56,12 +51,9 @@
 #define IXGBE_CTRL_EXT  0x00018
 #define IXGBE_ESDP      0x00020
 #define IXGBE_EODSDP    0x00028
-#define IXGBE_I2CCTL    0x00028
 #define IXGBE_LEDCTL    0x00200
 #define IXGBE_FRTIMER   0x00048
 #define IXGBE_TCPTIMER  0x0004C
-#define IXGBE_CORESPARE 0x00600
-#define IXGBE_EXVET     0x05078
 
 /* NVM Registers */
 #define IXGBE_EEC       0x10010
@@ -75,19 +67,6 @@
 #define IXGBE_FLOP      0x1013C
 #define IXGBE_GRC       0x10200
 
-/* General Receive Control */
-#define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */
-
-#define IXGBE_VPDDIAG0  0x10204
-#define IXGBE_VPDDIAG1  0x10208
-
-/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN    0x00000001
-#define IXGBE_I2C_CLK_OUT   0x00000002
-#define IXGBE_I2C_DATA_IN   0x00000004
-#define IXGBE_I2C_DATA_OUT  0x00000008
-
 /* Interrupt Registers */
 #define IXGBE_EICR      0x00800
 #define IXGBE_EICS      0x00808
@@ -95,45 +74,21 @@
 #define IXGBE_EIMC      0x00888
 #define IXGBE_EIAC      0x00810
 #define IXGBE_EIAM      0x00890
-#define IXGBE_EICS_EX(_i)   (0x00A90 + (_i) * 4)
-#define IXGBE_EIMS_EX(_i)   (0x00AA0 + (_i) * 4)
-#define IXGBE_EIMC_EX(_i)   (0x00AB0 + (_i) * 4)
-#define IXGBE_EIAM_EX(_i)   (0x00AD0 + (_i) * 4)
-/* 82599 EITR is only 12 bits, with the lower 3 always zero */
-/*
- * 82598 EITR is 16 bits but set the limits based on the max
- * supported by all ixgbe hardware
- */
-#define IXGBE_MAX_INT_RATE 488281
-#define IXGBE_MIN_INT_RATE 956
-#define IXGBE_MAX_EITR     0x00000FF8
-#define IXGBE_MIN_EITR     8
 #define IXGBE_EITR(_i)  (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
-                         (0x012300 + (((_i) - 24) * 4)))
-#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
-#define IXGBE_EITR_LLI_MOD      0x00008000
-#define IXGBE_EITR_CNT_WDIS     0x80000000
+                         (0x012300 + ((_i) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FFF
 #define IXGBE_IVAR(_i)  (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
-#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
-#define IXGBE_EITRSEL   0x00894
 #define IXGBE_MSIXT     0x00000 /* MSI-X Table. 0x0000 - 0x01C */
 #define IXGBE_MSIXPBA   0x02000 /* MSI-X Pending bit array */
 #define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
 #define IXGBE_GPIE      0x00898
 
 /* Flow Control Registers */
-#define IXGBE_FCADBUL   0x03210
-#define IXGBE_FCADBUH   0x03214
-#define IXGBE_FCAMACL   0x04328
-#define IXGBE_FCAMACH   0x0432C
-#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
 #define IXGBE_PFCTOP    0x03008
 #define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
 #define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
 #define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
 #define IXGBE_FCRTV     0x032A0
-#define IXGBE_FCCFG     0x03D00
 #define IXGBE_TFCS      0x0CE00
 
 /* Receive DMA Registers */
@@ -149,12 +104,6 @@
                          (0x0D018 + ((_i - 64) * 0x40)))
 #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
                           (0x0D028 + ((_i - 64) * 0x40)))
-#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
-                          (0x0D02C + ((_i - 64) * 0x40)))
-#define IXGBE_RSCDBU     0x03028
-#define IXGBE_RDDCC      0x02F20
-#define IXGBE_RXMEMWRAP  0x03190
-#define IXGBE_STARCTRL   0x03024
 /*
  * Split and Replication Receive Control Registers
  * 00-15 : 0x02100 + n*4
@@ -174,7 +123,6 @@
                                  (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
                                  (0x0D00C + ((_i - 64) * 0x40))))
 #define IXGBE_RDRXCTL           0x02F00
-#define IXGBE_RDRXCTL_RSC_PUSH  0x80
 #define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))
                                              /* 8 of these 0x03C00 - 0x03C1C */
 #define IXGBE_RXCTRL    0x03000
@@ -192,8 +140,6 @@
                          (0x0A200 + ((_i) * 8)))
 #define IXGBE_RAH(_i)   (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
                          (0x0A204 + ((_i) * 8)))
-#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
-#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
 /* Packet split receive type */
 #define IXGBE_PSRTYPE(_i)    (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
                               (0x0EA00 + ((_i) * 4)))
@@ -205,28 +151,6 @@
 #define IXGBE_VLNCTRL   0x05088
 #define IXGBE_MCSTCTRL  0x05090
 #define IXGBE_MRQC      0x05818
-#define IXGBE_SAQF(_i)  (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter 
*/
-#define IXGBE_DAQF(_i)  (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
-#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter 
*/
-#define IXGBE_FTQF(_i)  (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
-#define IXGBE_ETQF(_i)  (0x05128 + ((_i) * 4)) /* EType Queue Filter */
-#define IXGBE_ETQS(_i)  (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
-#define IXGBE_SYNQF     0x0EC30 /* SYN Packet Queue Filter */
-#define IXGBE_RQTC      0x0EC70
-#define IXGBE_MTQC      0x08120
-#define IXGBE_VLVF(_i)  (0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */
-#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */
-#define IXGBE_VT_CTL    0x051B0
-#define IXGBE_VFRE(_i)  (0x051E0 + ((_i) * 4))
-#define IXGBE_VFTE(_i)  (0x08110 + ((_i) * 4))
-#define IXGBE_QDE       0x2F04
-#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
-#define IXGBE_UTA(_i)   (0x0F400 + ((_i) * 4))
-#define IXGBE_VMRCTL(_i)        (0x0F600 + ((_i) * 4))
-#define IXGBE_VMRVLAN(_i)       (0x0F610 + ((_i) * 4))
-#define IXGBE_VMRVM(_i)         (0x0F630 + ((_i) * 4))
-#define IXGBE_L34T_IMIR(_i)      (0x0E800 + ((_i) * 4)) /*128 of these 
(0-127)*/
-#define IXGBE_LLITHRESH 0x0EC90
 #define IXGBE_IMIR(_i)  (0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
 #define IXGBE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
 #define IXGBE_IMIRVP    0x05AC0
@@ -234,33 +158,6 @@
 #define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
 #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
 
-/* Flow Director registers */
-#define IXGBE_FDIRCTRL  0x0EE00
-#define IXGBE_FDIRHKEY  0x0EE68
-#define IXGBE_FDIRSKEY  0x0EE6C
-#define IXGBE_FDIRDIP4M 0x0EE3C
-#define IXGBE_FDIRSIP4M 0x0EE40
-#define IXGBE_FDIRTCPM  0x0EE44
-#define IXGBE_FDIRUDPM  0x0EE48
-#define IXGBE_FDIRIP6M  0x0EE74
-#define IXGBE_FDIRM     0x0EE70
-
-/* Flow Director Stats registers */
-#define IXGBE_FDIRFREE  0x0EE38
-#define IXGBE_FDIRLEN   0x0EE4C
-#define IXGBE_FDIRUSTAT 0x0EE50
-#define IXGBE_FDIRFSTAT 0x0EE54
-#define IXGBE_FDIRMATCH 0x0EE58
-#define IXGBE_FDIRMISS  0x0EE5C
-
-/* Flow Director Programming registers */
-#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
-#define IXGBE_FDIRIPSA      0x0EE18
-#define IXGBE_FDIRIPDA      0x0EE1C
-#define IXGBE_FDIRPORT      0x0EE20
-#define IXGBE_FDIRVLAN      0x0EE24
-#define IXGBE_FDIRHASH      0x0EE28
-#define IXGBE_FDIRCMD       0x0EE2C
 
 /* Transmit DMA registers */
 #define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
@@ -273,23 +170,7 @@
 #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
 #define IXGBE_DTXCTL    0x07E00
 
-#define IXGBE_DMATXCTL  0x04A80
-#define IXGBE_PFDTXGSWC     0x08220
-#define IXGBE_DTXMXSZRQ     0x08100
-#define IXGBE_DTXTCPFLGL    0x04A88
-#define IXGBE_DTXTCPFLGH    0x04A8C
-#define IXGBE_LBDRPEN       0x0CA00
-#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
-
-#define IXGBE_DMATXCTL_TE       0x1 /* Transmit Enable */
-#define IXGBE_DMATXCTL_NS       0x2 /* No Snoop LSO hdr buffer */
-#define IXGBE_DMATXCTL_GDV      0x8 /* Global Double VLAN */
-#define IXGBE_DMATXCTL_VT_SHIFT 16  /* VLAN EtherType */
-
-#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
 #define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
-/* Tx DCA Control register : 128 of these (0-127) */
-#define IXGBE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40))
 #define IXGBE_TIPG      0x0CB00
 #define IXGBE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4)) /* 8 of these */
 #define IXGBE_MNGTXMAP  0x0CD10
@@ -381,181 +262,6 @@
 #define IXGBE_TDPT2TCSR(_i)     (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
 
 
-/* Security Control Registers */
-#define IXGBE_SECTXCTRL         0x08800
-#define IXGBE_SECTXSTAT         0x08804
-#define IXGBE_SECTXBUFFAF       0x08808
-#define IXGBE_SECTXMINIFG       0x08810
-#define IXGBE_SECTXSTAT         0x08804
-#define IXGBE_SECRXCTRL         0x08D00
-#define IXGBE_SECRXSTAT         0x08D04
-
-/* Security Bit Fields and Masks */
-#define IXGBE_SECTXCTRL_SECTX_DIS       0x00000001
-#define IXGBE_SECTXCTRL_TX_DIS          0x00000002
-#define IXGBE_SECTXCTRL_STORE_FORWARD   0x00000004
-
-#define IXGBE_SECTXSTAT_SECTX_RDY       0x00000001
-#define IXGBE_SECTXSTAT_ECC_TXERR       0x00000002
-
-#define IXGBE_SECRXCTRL_SECRX_DIS       0x00000001
-#define IXGBE_SECRXCTRL_RX_DIS          0x00000002
-
-#define IXGBE_SECRXSTAT_SECRX_RDY       0x00000001
-#define IXGBE_SECRXSTAT_ECC_RXERR       0x00000002
-
-/* LinkSec (MacSec) Registers */
-#define IXGBE_LSECTXCAP         0x08A00
-#define IXGBE_LSECRXCAP         0x08F00
-#define IXGBE_LSECTXCTRL        0x08A04
-#define IXGBE_LSECTXSCL         0x08A08 /* SCI Low */
-#define IXGBE_LSECTXSCH         0x08A0C /* SCI High */
-#define IXGBE_LSECTXSA          0x08A10
-#define IXGBE_LSECTXPN0         0x08A14
-#define IXGBE_LSECTXPN1         0x08A18
-#define IXGBE_LSECTXKEY0(_n)    (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECTXKEY1(_n)    (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECRXCTRL        0x08F04
-#define IXGBE_LSECRXSCL         0x08F08
-#define IXGBE_LSECRXSCH         0x08F0C
-#define IXGBE_LSECRXSA(_i)      (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXPN(_i)      (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
-#define IXGBE_LSECTXUT          0x08A3C /* OutPktsUntagged */
-#define IXGBE_LSECTXPKTE        0x08A40 /* OutPktsEncrypted */
-#define IXGBE_LSECTXPKTP        0x08A44 /* OutPktsProtected */
-#define IXGBE_LSECTXOCTE        0x08A48 /* OutOctetsEncrypted */
-#define IXGBE_LSECTXOCTP        0x08A4C /* OutOctetsProtected */
-#define IXGBE_LSECRXUT          0x08F40 /* InPktsUntagged/InPktsNoTag */
-#define IXGBE_LSECRXOCTD        0x08F44 /* InOctetsDecrypted */
-#define IXGBE_LSECRXOCTV        0x08F48 /* InOctetsValidated */
-#define IXGBE_LSECRXBAD         0x08F4C /* InPktsBadTag */
-#define IXGBE_LSECRXNOSCI       0x08F50 /* InPktsNoSci */
-#define IXGBE_LSECRXUNSCI       0x08F54 /* InPktsUnknownSci */
-#define IXGBE_LSECRXUNCH        0x08F58 /* InPktsUnchecked */
-#define IXGBE_LSECRXDELAY       0x08F5C /* InPktsDelayed */
-#define IXGBE_LSECRXLATE        0x08F60 /* InPktsLate */
-#define IXGBE_LSECRXOK(_n)      (0x08F64 + (0x04 * (_n))) /* InPktsOk */
-#define IXGBE_LSECRXINV(_n)     (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
-#define IXGBE_LSECRXNV(_n)      (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
-#define IXGBE_LSECRXUNSA        0x08F7C /* InPktsUnusedSa */
-#define IXGBE_LSECRXNUSA        0x08F80 /* InPktsNotUsingSa */
-
-/* LinkSec (MacSec) Bit Fields and Masks */
-#define IXGBE_LSECTXCAP_SUM_MASK        0x00FF0000
-#define IXGBE_LSECTXCAP_SUM_SHIFT       16
-#define IXGBE_LSECRXCAP_SUM_MASK        0x00FF0000
-#define IXGBE_LSECRXCAP_SUM_SHIFT       16
-
-#define IXGBE_LSECTXCTRL_EN_MASK        0x00000003
-#define IXGBE_LSECTXCTRL_DISABLE        0x0
-#define IXGBE_LSECTXCTRL_AUTH           0x1
-#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT   0x2
-#define IXGBE_LSECTXCTRL_AISCI          0x00000020
-#define IXGBE_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
-#define IXGBE_LSECTXCTRL_RSV_MASK       0x000000D8
-
-#define IXGBE_LSECRXCTRL_EN_MASK        0x0000000C
-#define IXGBE_LSECRXCTRL_EN_SHIFT       2
-#define IXGBE_LSECRXCTRL_DISABLE        0x0
-#define IXGBE_LSECRXCTRL_CHECK          0x1
-#define IXGBE_LSECRXCTRL_STRICT         0x2
-#define IXGBE_LSECRXCTRL_DROP           0x3
-#define IXGBE_LSECRXCTRL_PLSH           0x00000040
-#define IXGBE_LSECRXCTRL_RP             0x00000080
-#define IXGBE_LSECRXCTRL_RSV_MASK       0xFFFFFF33
-
-/* IpSec Registers */
-#define IXGBE_IPSTXIDX          0x08900
-#define IXGBE_IPSTXSALT         0x08904
-#define IXGBE_IPSTXKEY(_i)      (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXIDX          0x08E00
-#define IXGBE_IPSRXIPADDR(_i)   (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSPI          0x08E14
-#define IXGBE_IPSRXIPIDX        0x08E18
-#define IXGBE_IPSRXKEY(_i)      (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSALT         0x08E2C
-#define IXGBE_IPSRXMOD          0x08E30
-
-#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4
-
-/* DCB registers */
-#define IXGBE_RTRPCS      0x02430
-#define IXGBE_RTTDCS      0x04900
-#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */
-#define IXGBE_RTTPCS      0x0CD00
-#define IXGBE_RTRUP2TC    0x03020
-#define IXGBE_RTTUP2TC    0x0C800
-#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDQSEL    0x04904
-#define IXGBE_RTTDT1C     0x04908
-#define IXGBE_RTTDT1S     0x0490C
-#define IXGBE_RTTDTECC    0x04990
-#define IXGBE_RTTDTECC_NO_BCN   0x00000100
-
-#define IXGBE_RTTBCNRC    0x04984
-
-
-/* FCoE DMA Context Registers */
-#define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
-#define IXGBE_FCPTRH    0x02414 /* FC USer Desc. PTR High */
-#define IXGBE_FCBUFF    0x02418 /* FC Buffer Control */
-#define IXGBE_FCDMARW   0x02420 /* FC Receive DMA RW */
-#define IXGBE_FCINVST0  0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
-#define IXGBE_FCINVST(_i)       (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID      (1 << 0)   /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE   (3 << 3)   /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX    (1 << 7)   /* 0: Initiator, 1: Target */
-#define IXGBE_FCBUFF_BUFFCNT    0x0000ff00 /* Number of User Buffers */
-#define IXGBE_FCBUFF_OFFSET     0xffff0000 /* User Buffer Offset */
-#define IXGBE_FCBUFF_BUFFSIZE_SHIFT  3
-#define IXGBE_FCBUFF_BUFFCNT_SHIFT   8
-#define IXGBE_FCBUFF_OFFSET_SHIFT    16
-#define IXGBE_FCDMARW_WE        (1 << 14)   /* Write enable */
-#define IXGBE_FCDMARW_RE        (1 << 15)   /* Read enable */
-#define IXGBE_FCDMARW_FCOESEL   0x000001ff  /* FC X_ID: 11 bits */
-#define IXGBE_FCDMARW_LASTSIZE  0xffff0000  /* Last User Buffer Size */
-#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
-/* FCoE SOF/EOF */
-#define IXGBE_TEOFF     0x04A94 /* Tx FC EOF */
-#define IXGBE_TSOFF     0x04A98 /* Tx FC SOF */
-#define IXGBE_REOFF     0x05158 /* Rx FC EOF */
-#define IXGBE_RSOFF     0x051F8 /* Rx FC SOF */
-/* FCoE Filter Context Registers */
-#define IXGBE_FCFLT     0x05108 /* FC FLT Context */
-#define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */
-#define IXGBE_FCPARAM   0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID       (1 << 0)   /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST       (1 << 1)   /* Filter First */
-#define IXGBE_FCFLT_SEQID       0x00ff0000 /* Sequence ID */
-#define IXGBE_FCFLT_SEQCNT      0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT    (1 << 13)  /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE        (1 << 14)  /* Write Enable */
-#define IXGBE_FCFLTRW_RE        (1 << 15)  /* Read Enable */
-/* FCoE Receive Control */
-#define IXGBE_FCRXCTRL  0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI  (1 << 0)   /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD   (1 << 1)   /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH  (1 << 2)   /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3)   /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH     (1 << 4)   /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5)   /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC     (1 << 6)   /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO  (1 << 7)   /* FC CRC Byte Ordering */
-#define IXGBE_FCRXCTRL_FCOEVER  0x00000f00 /* FCoE Version: 4 bits */
-#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
-/* FCoE Redirection */
-#define IXGBE_FCRECTL   0x0ED00 /* FC Redirection Control */
-#define IXGBE_FCRETA0   0x0ED10 /* FC Redirection Table 0 */
-#define IXGBE_FCRETA(_i)        (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
-#define IXGBE_FCRECTL_ENA       0x1        /* FCoE Redir Table Enable */
-#define IXGBE_FCRETA_SIZE       8          /* Max entries in FCRETA */
-#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
 
 /* Stats registers */
 #define IXGBE_CRCERRS   0x04000
@@ -570,11 +276,6 @@
 #define IXGBE_LXONRXC   0x0CF60
 #define IXGBE_LXOFFTXC  0x03F68
 #define IXGBE_LXOFFRXC  0x0CF68
-#define IXGBE_LXONRXCNT 0x041A4
-#define IXGBE_LXOFFRXCNT 0x041A8
-#define IXGBE_PXONRXCNT(_i)     (0x04140 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXOFFRXCNT(_i)    (0x04160 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXON2OFFCNT(_i)   (0x03240 + ((_i) * 4)) /* 8 of these */
 #define IXGBE_PXONTXC(_i)       (0x03F00 + ((_i) * 4)) /* 8 of these 
3F00-3F1C*/
 #define IXGBE_PXONRXC(_i)       (0x0CF00 + ((_i) * 4)) /* 8 of these 
CF00-CF1C*/
 #define IXGBE_PXOFFTXC(_i)      (0x03F20 + ((_i) * 4)) /* 8 of these 
3F20-3F3C*/
@@ -614,29 +315,15 @@
 #define IXGBE_MPTC      0x040F0
 #define IXGBE_BPTC      0x040F4
 #define IXGBE_XEC       0x04120
-#define IXGBE_SSVPC     0x08780
 
 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
 #define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
                          (0x08600 + ((_i) * 4)))
-#define IXGBE_TQSM(_i)  (0x08600 + ((_i) * 4))
 
 #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_FCCRC     0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
-#define IXGBE_FCOERPDC  0x0241C /* FCoE Rx Packets Dropped Count */
-#define IXGBE_FCLAST    0x02424 /* FCoE Last Error Count */
-#define IXGBE_FCOEPRC   0x02428 /* Number of FCoE Packets Received */
-#define IXGBE_FCOEDWRC  0x0242C /* Number of FCoE DWords Received */
-#define IXGBE_FCOEPTC   0x08784 /* Number of FCoE Packets Transmitted */
-#define IXGBE_FCOEDWTC  0x08788 /* Number of FCoE DWords Transmitted */
-#define IXGBE_FCCRC_CNT_MASK    0x0000FFFF /* CRC_CNT: bit 0 - 15 */
-#define IXGBE_FCLAST_CNT_MASK   0x0000FFFF /* Last_CNT: bit 0 - 15 */
 
 /* Management */
 #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -649,9 +336,6 @@
 #define IXGBE_MMAL(_i)  (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
 #define IXGBE_MMAH(_i)  (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
 #define IXGBE_FTFT      0x09400 /* 0x9400-0x97FC */
-#define IXGBE_METF(_i)  (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW     0x15014
 
 /* ARC Subsystem registers */
 #define IXGBE_HICR      0x15F00
@@ -684,65 +368,16 @@
 #define IXGBE_DCA_ID    0x11070
 #define IXGBE_DCA_CTRL  0x11074
 
-/* PCI-E registers 82599-Specific */
-#define IXGBE_GCR_EXT           0x11050
-#define IXGBE_GSCL_5_82599      0x11030
-#define IXGBE_GSCL_6_82599      0x11034
-#define IXGBE_GSCL_7_82599      0x11038
-#define IXGBE_GSCL_8_82599      0x1103C
-#define IXGBE_PHYADR_82599      0x11040
-#define IXGBE_PHYDAT_82599      0x11044
-#define IXGBE_PHYCTL_82599      0x11048
-#define IXGBE_PBACLR_82599      0x11068
-#define IXGBE_CIAA_82599        0x11088
-#define IXGBE_CIAD_82599        0x1108C
-#define IXGBE_PCIE_DIAG_0_82599 0x11090
-#define IXGBE_PCIE_DIAG_1_82599 0x11094
-#define IXGBE_PCIE_DIAG_2_82599 0x11098
-#define IXGBE_PCIE_DIAG_3_82599 0x1109C
-#define IXGBE_PCIE_DIAG_4_82599 0x110A0
-#define IXGBE_PCIE_DIAG_5_82599 0x110A4
-#define IXGBE_PCIE_DIAG_6_82599 0x110A8
-#define IXGBE_PCIE_DIAG_7_82599 0x110C0
-#define IXGBE_INTRPT_CSR_82599  0x110B0
-#define IXGBE_INTRPT_MASK_82599 0x110B8
-#define IXGBE_CDQ_MBR_82599     0x110B4
-#define IXGBE_MISC_REG_82599    0x110F0
-#define IXGBE_ECC_CTRL_0_82599  0x11100
-#define IXGBE_ECC_CTRL_1_82599  0x11104
-#define IXGBE_ECC_STATUS_82599  0x110E0
-#define IXGBE_BAR_CTRL_82599    0x110F4
-
-/* Time Sync Registers */
-#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
-#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
-#define IXGBE_RXSTMPL    0x051E8 /* Rx timestamp Low - RO */
-#define IXGBE_RXSTMPH    0x051A4 /* Rx timestamp High - RO */
-#define IXGBE_RXSATRL    0x051A0 /* Rx timestamp attribute low - RO */
-#define IXGBE_RXSATRH    0x051A8 /* Rx timestamp attribute high - RO */
-#define IXGBE_RXMTRL     0x05120 /* RX message type register low - RW */
-#define IXGBE_TXSTMPL    0x08C04 /* Tx timestamp value Low - RO */
-#define IXGBE_TXSTMPH    0x08C08 /* Tx timestamp value High - RO */
-#define IXGBE_SYSTIML    0x08C0C /* System time register Low - RO */
-#define IXGBE_SYSTIMH    0x08C10 /* System time register High - RO */
-#define IXGBE_TIMINCA    0x08C14 /* Increment attributes register - RW */
-#define IXGBE_RXUDP      0x08C1C /* Time Sync Rx UDP Port - RW */
-
 /* Diagnostic Registers */
 #define IXGBE_RDSTATCTL   0x02C20
 #define IXGBE_RDSTAT(_i)  (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
 #define IXGBE_RDHMPN      0x02F08
 #define IXGBE_RIC_DW(_i)  (0x02F10 + ((_i) * 4))
 #define IXGBE_RDPROBE     0x02F20
-#define IXGBE_RDMAM       0x02F30
-#define IXGBE_RDMAD       0x02F34
 #define IXGBE_TDSTATCTL   0x07C20
 #define IXGBE_TDSTAT(_i)  (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
 #define IXGBE_TDHMPN      0x07F08
-#define IXGBE_TDHMPN2     0x082FC
-#define IXGBE_TXDESCIC    0x082CC
 #define IXGBE_TIC_DW(_i)  (0x07F10 + ((_i) * 4))
-#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
 #define IXGBE_TDPROBE     0x07F20
 #define IXGBE_TXBUFCTRL   0x0C600
 #define IXGBE_TXBUFDATA0  0x0C610
@@ -770,10 +405,6 @@
 #define IXGBE_TXDATARDPTR(_i)   (0x0C720 + ((_i) * 4)) /* 8 of these 
C720-C72C*/
 #define IXGBE_TXDESCRDPTR(_i)   (0x0C730 + ((_i) * 4)) /* 8 of these 
C730-C73C*/
 #define IXGBE_PCIEECCCTL 0x1106C
-#define IXGBE_PCIEECCCTL0 0x11100
-#define IXGBE_PCIEECCCTL1 0x11104
-#define IXGBE_RXDBUECC  0x03F70
-#define IXGBE_TXDBUECC  0x0CF70
 #define IXGBE_PBTXECC   0x0C300
 #define IXGBE_PBRXECC   0x03300
 #define IXGBE_GHECCR    0x110B0
@@ -799,74 +430,24 @@
 #define IXGBE_MSRWD     0x04260
 #define IXGBE_MLADD     0x04264
 #define IXGBE_MHADD     0x04268
-#define IXGBE_MAXFRS    0x04268
 #define IXGBE_TREG      0x0426C
 #define IXGBE_PCSS1     0x04288
 #define IXGBE_PCSS2     0x0428C
 #define IXGBE_XPCSS     0x04290
-#define IXGBE_MFLCN     0x04294
 #define IXGBE_SERDESC   0x04298
 #define IXGBE_MACS      0x0429C
 #define IXGBE_AUTOC     0x042A0
 #define IXGBE_LINKS     0x042A4
-#define IXGBE_LINKS2    0x04324
 #define IXGBE_AUTOC2    0x042A8
 #define IXGBE_AUTOC3    0x042AC
 #define IXGBE_ANLP1     0x042B0
 #define IXGBE_ANLP2     0x042B4
 #define IXGBE_ATLASCTL  0x04800
-#define IXGBE_MMNGC     0x042D0
-#define IXGBE_ANLPNP1   0x042D4
-#define IXGBE_ANLPNP2   0x042D8
-#define IXGBE_KRPCSFC   0x042E0
-#define IXGBE_KRPCSS    0x042E4
-#define IXGBE_FECS1     0x042E8
-#define IXGBE_FECS2     0x042EC
-#define IXGBE_SMADARCTL 0x14F10
-#define IXGBE_MPVC      0x04318
-#define IXGBE_SGMIIC    0x04314
-
-/* Omer CORECTL */
-#define IXGBE_CORECTL           0x014F00
-/* BARCTRL */
-#define IXGBE_BARCTRL           0x110F4
-#define IXGBE_BARCTRL_FLSIZE    0x0700
-#define IXGBE_BARCTRL_CSRSIZE   0x2000
-
-/* RSCCTL Bit Masks */
-#define IXGBE_RSCCTL_RSCEN          0x01
-#define IXGBE_RSCCTL_MAXDESC_1      0x00
-#define IXGBE_RSCCTL_MAXDESC_4      0x04
-#define IXGBE_RSCCTL_MAXDESC_8      0x08
-#define IXGBE_RSCCTL_MAXDESC_16     0x0C
-
-/* RSCDBU Bit Masks */
-#define IXGBE_RSCDBU_RSCSMALDIS_MASK    0x0000007F
-#define IXGBE_RSCDBU_RSCACKDIS          0x00000080
 
 /* RDRXCTL Bit Masks */
 #define IXGBE_RDRXCTL_RDMTS_1_2     0x00000000 /* Rx Desc Min Threshold Size */
-#define IXGBE_RDRXCTL_CRCSTRIP      0x00000002 /* CRC Strip */
 #define IXGBE_RDRXCTL_MVMEN         0x00000020
 #define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */
-#define IXGBE_RDRXCTL_AGGDIS        0x00010000 /* Aggregation disable */
-#define IXGBE_RDRXCTL_RSCFRSTSIZE   0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS     0x00800000 /* Disable RSC compl on LLI */
-
-/* RQTC Bit Masks and Shifts */
-#define IXGBE_RQTC_SHIFT_TC(_i)     ((_i) * 4)
-#define IXGBE_RQTC_TC0_MASK         (0x7 << 0)
-#define IXGBE_RQTC_TC1_MASK         (0x7 << 4)
-#define IXGBE_RQTC_TC2_MASK         (0x7 << 8)
-#define IXGBE_RQTC_TC3_MASK         (0x7 << 12)
-#define IXGBE_RQTC_TC4_MASK         (0x7 << 16)
-#define IXGBE_RQTC_TC5_MASK         (0x7 << 20)
-#define IXGBE_RQTC_TC6_MASK         (0x7 << 24)
-#define IXGBE_RQTC_TC7_MASK         (0x7 << 28)
-
-/* PSRTYPE.RQPL Bit masks and shift */
-#define IXGBE_PSRTYPE_RQPL_MASK     0x7
-#define IXGBE_PSRTYPE_RQPL_SHIFT    29
 
 /* CTRL Bit Masks */
 #define IXGBE_CTRL_GIO_DIS      0x00000004 /* Global IO Master Disable bit */
@@ -894,18 +475,11 @@
 #define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
 
 #define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599  0xFF000000 /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
 #define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
 #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
 #define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
 
 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599  0xFF000000 /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
 #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
 #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
 #define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */
@@ -949,8 +523,6 @@
 #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL    0xF0
 #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL    0xF0
 
-/* Omer bit masks */
-#define IXGBE_CORECTL_WRITE_CMD         0x00010000
 
 /* Device Type definitions for new protocol MDIO commands */
 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE               0x1
@@ -978,11 +550,6 @@
 #define IXGBE_MDIO_PHY_SPEED_ABILITY   0x4 /* Speed Ability Reg */
 #define IXGBE_MDIO_PHY_SPEED_10G       0x0001 /* 10G capable */
 #define IXGBE_MDIO_PHY_SPEED_1G        0x0010 /* 1G capable */
-#define IXGBE_MDIO_PHY_SPEED_100M      0x0020 /* 100M capable */
-#define IXGBE_MDIO_PHY_EXT_ABILITY        0xB /* Ext Ability Reg */
-#define IXGBE_MDIO_PHY_10GBASET_ABILITY   0x0004 /* 10GBaseT capable */
-#define IXGBE_MDIO_PHY_1000BASET_ABILITY  0x0020 /* 1000BaseT capable */
-#define IXGBE_MDIO_PHY_100BASETX_ABILITY  0x0080 /* 100BaseTX capable */
 
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR     0xC30A /* PHY_XS SDA/SCL Addr Reg 
*/
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg 
*/
@@ -1002,8 +569,6 @@
 /* PHY IDs*/
 #define TN1010_PHY_ID    0x00A19410
 #define TNX_FW_REV       0xB
-#define AQ1002_PHY_ID    0x03A1B420
-#define AQ_FW_REV        0x20
 #define QT2022_PHY_ID    0x0043A400
 #define ATH_PHY_ID       0x03429050
 
@@ -1025,17 +590,11 @@
 /* General purpose Interrupt Enable */
 #define IXGBE_SDP0_GPIEN         0x00000001 /* SDP0 */
 #define IXGBE_SDP1_GPIEN         0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN         0x00000004 /* SDP2 */
 #define IXGBE_GPIE_MSIX_MODE     0x00000010 /* MSI-X mode */
 #define IXGBE_GPIE_OCD           0x00000020 /* Other Clear Disable */
 #define IXGBE_GPIE_EIMEN         0x00000040 /* Immediate Interrupt Enable */
 #define IXGBE_GPIE_EIAME         0x40000000
 #define IXGBE_GPIE_PBA_SUPPORT   0x80000000
-#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
-#define IXGBE_GPIE_VTMODE_MASK   0x0000C000 /* VT Mode Mask */
-#define IXGBE_GPIE_VTMODE_16     0x00004000 /* 16 VFs 8 queues per VF */
-#define IXGBE_GPIE_VTMODE_32     0x00008000 /* 32 VFs 4 queues per VF */
-#define IXGBE_GPIE_VTMODE_64     0x0000C000 /* 64 VFs 2 queues per VF */
 
 /* Transmit Flow Control status */
 #define IXGBE_TFCS_TXOFF         0x00000001
@@ -1076,25 +635,6 @@
 #define IXGBE_VMD_CTL_VMDQ_EN     0x00000001
 #define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
 
-/* VT_CTL bitmasks */
-#define IXGBE_VT_CTL_DIS_DEFPL  0x20000000 /* disable default pool */
-#define IXGBE_VT_CTL_REPLEN     0x40000000 /* replication enabled */
-#define IXGBE_VT_CTL_VT_ENABLE  0x00000001  /* Enable VT Mode */
-#define IXGBE_VT_CTL_POOL_SHIFT 7
-#define IXGBE_VT_CTL_POOL_MASK  (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
-
-/* VMOLR bitmasks */
-#define IXGBE_VMOLR_AUPE        0x01000000 /* accept untagged packets */
-#define IXGBE_VMOLR_ROMPE       0x02000000 /* accept packets in MTA tbl */
-#define IXGBE_VMOLR_ROPE        0x04000000 /* accept packets in UC tbl */
-#define IXGBE_VMOLR_BAM         0x08000000 /* accept broadcast packets */
-#define IXGBE_VMOLR_MPE         0x10000000 /* multicast promiscuous */
-
-/* VFRE bitmask */
-#define IXGBE_VFRE_ENABLE_ALL   0xFFFFFFFF
-
-#define IXGBE_VF_INIT_TIMEOUT   200 /* Number of retries to clear RSTI */
-
 /* RDHMPN and TDHMPN bitmasks */
 #define IXGBE_RDHMPN_RDICADDR       0x007FF800
 #define IXGBE_RDHMPN_RDICRDREQ      0x00800000
@@ -1103,41 +643,6 @@
 #define IXGBE_TDHMPN_TDICRDREQ      0x00800000
 #define IXGBE_TDHMPN_TDICADDR_SHIFT 11
 
-#define IXGBE_RDMAM_MEM_SEL_SHIFT   13
-#define IXGBE_RDMAM_DWORD_SHIFT     9
-#define IXGBE_RDMAM_DESC_COMP_FIFO  1
-#define IXGBE_RDMAM_DFC_CMD_FIFO    2
-#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
-#define IXGBE_RDMAM_TCN_STATUS_RAM  4
-#define IXGBE_RDMAM_WB_COLL_FIFO    5
-#define IXGBE_RDMAM_QSC_CNT_RAM     6
-#define IXGBE_RDMAM_QSC_FCOE_RAM    7
-#define IXGBE_RDMAM_QSC_QUEUE_CNT   8
-#define IXGBE_RDMAM_QSC_QUEUE_RAM   0xA
-#define IXGBE_RDMAM_QSC_RSC_RAM     0xB
-#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE     135
-#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT     4
-#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE      48
-#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT      7
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE   32
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT   4
-#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE    256
-#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT    9
-#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE      8
-#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT      4
-#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE       64
-#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT       4
-#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE      512
-#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT      5
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE     32
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT     4
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE     128
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT     8
-#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE       32
-#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT       8
-
-#define IXGBE_TXDESCIC_READY        0x80000000
-
 /* Receive Checksum Control */
 #define IXGBE_RXCSUM_IPPCSE     0x00001000   /* IP payload checksum enable */
 #define IXGBE_RXCSUM_PCSD       0x00002000   /* packet checksum disabled */
@@ -1158,25 +663,15 @@
 #define IXGBE_RMCS_TFCE_PRIORITY       0x00000010 /* Tx Priority FC ena */
 #define IXGBE_RMCS_ARBDIS       0x00000040 /* Arbitration disable bit */
 
-/* FCCFG Bit Masks */
-#define IXGBE_FCCFG_TFCE_802_3X         0x00000008 /* Tx link FC enable */
-#define IXGBE_FCCFG_TFCE_PRIORITY       0x00000010 /* Tx priority FC enable */
 
 /* Interrupt register bitmasks */
 
 /* Extended Interrupt Cause Read */
 #define IXGBE_EICR_RTX_QUEUE    0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_FLOW_DIR     0x00010000 /* FDir Exception */
-#define IXGBE_EICR_RX_MISS      0x00020000 /* Packet Buffer Overrun */
-#define IXGBE_EICR_PCI          0x00040000 /* PCI Exception */
-#define IXGBE_EICR_MAILBOX      0x00080000 /* VF to PF Mailbox Interrupt */
 #define IXGBE_EICR_LSC          0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC      0x00200000 /* PN Threshold */
 #define IXGBE_EICR_MNG          0x00400000 /* Manageability Event Interrupt */
 #define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
 #define IXGBE_EICR_GPI_SDP1     0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2     0x04000000 /* Gen Purpose Interrupt on SDP2 */
-#define IXGBE_EICR_ECC          0x10000000 /* ECC Error */
 #define IXGBE_EICR_PBUR         0x10000000 /* Packet Buffer Handler Error */
 #define IXGBE_EICR_DHER         0x20000000 /* Descriptor Handler Error */
 #define IXGBE_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
@@ -1184,16 +679,10 @@
 
 /* Extended Interrupt Cause Set */
 #define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
-#define IXGBE_EICS_RX_MISS      IXGBE_EICR_RX_MISS   /* Pkt Buffer Overrun */
-#define IXGBE_EICS_PCI          IXGBE_EICR_PCI       /* PCI Exception */
-#define IXGBE_EICS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
 #define IXGBE_EICS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EICS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
 #define IXGBE_EICS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
 #define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
-#define IXGBE_EICS_ECC          IXGBE_EICR_ECC       /* ECC Error */
 #define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EICS_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
 #define IXGBE_EICS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1201,16 +690,10 @@
 
 /* Extended Interrupt Mask Set */
 #define IXGBE_EIMS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
-#define IXGBE_EIMS_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun 
*/
-#define IXGBE_EIMS_PCI          IXGBE_EICR_PCI       /* PCI Exception */
-#define IXGBE_EIMS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
 #define IXGBE_EIMS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
 #define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
 #define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMS_ECC          IXGBE_EICR_ECC       /* ECC Error */
 #define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EIMS_DHER         IXGBE_EICR_DHER      /* Descr Handler Error */
 #define IXGBE_EIMS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1218,16 +701,10 @@
 
 /* Extended Interrupt Mask Clear */
 #define IXGBE_EIMC_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
-#define IXGBE_EIMC_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun 
*/
-#define IXGBE_EIMC_PCI          IXGBE_EICR_PCI       /* PCI Exception */
-#define IXGBE_EIMC_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
 #define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
 #define IXGBE_EIMC_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
 #define IXGBE_EIMC_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMC_ECC          IXGBE_EICR_ECC       /* ECC Error */
 #define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Err */
 #define IXGBE_EIMC_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1250,45 +727,12 @@
 #define IXGBE_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
 #define IXGBE_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
 #define IXGBE_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of control bits 
*/
-#define IXGBE_IMIR_SIZE_BP_82599  0x00001000 /* Packet size bypass */
-#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
-#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
-#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
-#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
-#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
-#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
-#define IXGBE_IMIR_CTRL_BP_82599  0x00080000 /* Bypass check of control bits */
-#define IXGBE_IMIR_LLI_EN_82599   0x00100000 /* Enables low latency Int */
-#define IXGBE_IMIR_RX_QUEUE_MASK_82599  0x0000007F /* Rx Queue Mask */
-#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
-#define IXGBE_IMIRVP_PRIORITY_MASK      0x00000007 /* VLAN priority mask */
-#define IXGBE_IMIRVP_PRIORITY_EN        0x00000008 /* VLAN priority enable */
-
-#define IXGBE_MAX_FTQF_FILTERS          128
-#define IXGBE_FTQF_PROTOCOL_MASK        0x00000003
-#define IXGBE_FTQF_PROTOCOL_TCP         0x00000000
-#define IXGBE_FTQF_PROTOCOL_UDP         0x00000001
-#define IXGBE_FTQF_PROTOCOL_SCTP        2
-#define IXGBE_FTQF_PRIORITY_MASK        0x00000007
-#define IXGBE_FTQF_PRIORITY_SHIFT       2
-#define IXGBE_FTQF_POOL_MASK            0x0000003F
-#define IXGBE_FTQF_POOL_SHIFT           8
-#define IXGBE_FTQF_5TUPLE_MASK_MASK     0x0000001F
-#define IXGBE_FTQF_5TUPLE_MASK_SHIFT    25
-#define IXGBE_FTQF_SOURCE_ADDR_MASK     0x1E
-#define IXGBE_FTQF_DEST_ADDR_MASK       0x1D
-#define IXGBE_FTQF_SOURCE_PORT_MASK     0x1B
-#define IXGBE_FTQF_DEST_PORT_MASK       0x17
-#define IXGBE_FTQF_PROTOCOL_COMP_MASK   0x0F
-#define IXGBE_FTQF_POOL_MASK_EN         0x40000000
-#define IXGBE_FTQF_QUEUE_ENABLE         0x80000000
 
 /* Interrupt clear mask */
 #define IXGBE_IRQ_CLEAR_MASK    0xFFFFFFFF
 
 /* Interrupt Vector Allocation Registers */
 #define IXGBE_IVAR_REG_NUM      25
-#define IXGBE_IVAR_REG_NUM_82599           64
 #define IXGBE_IVAR_TXRX_ENTRY   96
 #define IXGBE_IVAR_RX_ENTRY     64
 #define IXGBE_IVAR_RX_QUEUE(_i)    (0 + (_i))
@@ -1302,32 +746,6 @@
 
 #define IXGBE_IVAR_ALLOC_VAL    0x80 /* Interrupt Allocation valid */
 
-/* ETYPE Queue Filter/Select Bit Masks */
-#define IXGBE_MAX_ETQF_FILTERS  8
-#define IXGBE_ETQF_FCOE         0x08000000 /* bit 27 */
-#define IXGBE_ETQF_BCN          0x10000000 /* bit 28 */
-#define IXGBE_ETQF_1588         0x40000000 /* bit 30 */
-#define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */
-
-#define IXGBE_ETQS_RX_QUEUE     0x007F0000 /* bits 22:16 */
-#define IXGBE_ETQS_RX_QUEUE_SHIFT       16
-#define IXGBE_ETQS_LLI          0x20000000 /* bit 29 */
-#define IXGBE_ETQS_QUEUE_EN     0x80000000 /* bit 31 */
-
-/*
- * ETQF filter list: one static filter per filter consumer. This is
- *                   to avoid filter collisions later. Add new filters
- *                   here!!
- *
- * Current filters:
- *    EAPOL 802.1x (0x888e): Filter 0
- *    FCoE (0x8906):         Filter 2
- *    1588 (0x88f7):         Filter 3
- */
-#define IXGBE_ETQF_FILTER_EAPOL          0
-#define IXGBE_ETQF_FILTER_FCOE           2
-#define IXGBE_ETQF_FILTER_1588           3
 /* VLAN Control Bit Masks */
 #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
 #define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
@@ -1335,9 +753,6 @@
 #define IXGBE_VLNCTRL_VFE       0x40000000  /* bit 30 */
 #define IXGBE_VLNCTRL_VME       0x80000000  /* bit 31 */
 
-/* VLAN pool filtering masks */
-#define IXGBE_VLVF_VIEN         0x80000000  /* filter is valid */
-#define IXGBE_VLVF_ENTRIES      64
 
 #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.1q protocol */
 
@@ -1350,10 +765,7 @@
 #define IXGBE_STATUS_LAN_ID_1   0x00000004 /* LAN ID 1 */
 
 /* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
-#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
-#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
-#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000001
 #define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
 #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
 #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
@@ -1390,17 +802,9 @@
 #define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
 #define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
 #define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
-#define IXGBE_AUTOC_FECA        0x00040000
-#define IXGBE_AUTOC_FECR        0x00020000
-#define IXGBE_AUTOC_KR_SUPP     0x00010000
 #define IXGBE_AUTOC_AN_RESTART  0x00001000
 #define IXGBE_AUTOC_FLU         0x00000001
 #define IXGBE_AUTOC_LMS_SHIFT   13
-#define IXGBE_AUTOC_LMS_10G_SERIAL      (0x3 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR       (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_SGMII_1G_100M   (0x5 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
 #define IXGBE_AUTOC_LMS_MASK            (0x7 << IXGBE_AUTOC_LMS_SHIFT)
 #define IXGBE_AUTOC_LMS_1G_LINK_NO_AN   (0x0 << IXGBE_AUTOC_LMS_SHIFT)
 #define IXGBE_AUTOC_LMS_10G_LINK_NO_AN  (0x1 << IXGBE_AUTOC_LMS_SHIFT)
@@ -1418,15 +822,6 @@
 #define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
 #define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
 #define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI     (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX   (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC2_UPPER_MASK  0xFFFF0000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK  0x00030000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR  (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
 
 /* LINKS Bit Masks */
 #define IXGBE_LINKS_KX_AN_COMP  0x80000000
@@ -1436,7 +831,6 @@
 #define IXGBE_LINKS_RX_MODE     0x06000000
 #define IXGBE_LINKS_TX_MODE     0x01800000
 #define IXGBE_LINKS_XGXS_EN     0x00400000
-#define IXGBE_LINKS_SGMII_EN    0x02000000
 #define IXGBE_LINKS_PCS_1G_EN   0x00200000
 #define IXGBE_LINKS_1G_AN_EN    0x00100000
 #define IXGBE_LINKS_KX_AN_IDLE  0x00080000
@@ -1446,13 +840,11 @@
 #define IXGBE_LINKS_TL_FAULT    0x00001000
 #define IXGBE_LINKS_SIGNAL      0x00000F00
 
-#define IXGBE_LINKS_SPEED_82599     0x30000000
-#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
-#define IXGBE_LINKS_SPEED_1G_82599  0x20000000
-#define IXGBE_LINKS_SPEED_100_82599 0x10000000
 #define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
 #define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
 
+#define FIBER_LINK_UP_LIMIT     50
+
 /* PCS1GLSTA Bit Masks */
 #define IXGBE_PCS1GLSTA_LINK_OK         1
 #define IXGBE_PCS1GLSTA_SYNK_OK         0x10
@@ -1524,14 +916,6 @@
 #define IXGBE_FW_PTR            0x0F
 #define IXGBE_PBANUM0_PTR       0x15
 #define IXGBE_PBANUM1_PTR       0x16
-#define IXGBE_SAN_MAC_ADDR_PTR  0x28
-#define IXGBE_DEVICE_CAPS       0x2C
-#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
-#define IXGBE_PCIE_MSIX_82599_CAPS  0x72
-#define IXGBE_PCIE_MSIX_82598_CAPS  0x62
-
-/* MSI-X capability fields masks */
-#define IXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF
 
 /* Legacy EEPROM word offsets */
 #define IXGBE_ISCSI_BOOT_CAPS           0x0033
@@ -1570,18 +954,6 @@
 #define IXGBE_EERD_ATTEMPTS 100000
 #endif
 
-#define IXGBE_PCIE_CTRL2                 0x5   /* PCIe Control 2 Offset */
-#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE    0x8   /* Dummy Function Enable */
-#define IXGBE_PCIE_CTRL2_LAN_DISABLE     0x2   /* LAN PCI Disable */
-#define IXGBE_PCIE_CTRL2_DISABLE_SELECT  0x1   /* LAN Disable Select */
-
-#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET  0x0
-#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET  0x3
-#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP  0x1
-#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS  0x2
-#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR   0x4
-#define IXGBE_FW_PATCH_VERSION_4   0x7
-
 /* PCI Bus Info */
 #define IXGBE_PCI_LINK_STATUS     0xB2
 #define IXGBE_PCI_LINK_WIDTH      0x3F0
@@ -1646,7 +1018,6 @@
 #define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
 #define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
 #define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
-#define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
 
 #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
 #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -1657,23 +1028,9 @@
 /* Receive Priority Flow Control Enable */
 #define IXGBE_FCTRL_RPFCE 0x00004000
 #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
-#define IXGBE_MFLCN_PMCF        0x00000001 /* Pass MAC Control Frames */
-#define IXGBE_MFLCN_DPF         0x00000002 /* Discard Pause Frame */
-#define IXGBE_MFLCN_RPFCE       0x00000004 /* Receive Priority FC Enable */
-#define IXGBE_MFLCN_RFCE        0x00000008 /* Receive FC Enable */
 
 /* Multiple Receive Queue Control */
 #define IXGBE_MRQC_RSSEN                 0x00000001  /* RSS Enable */
-#define IXGBE_MRQC_MRQE_MASK                    0xF /* Bits 3:0 */
-#define IXGBE_MRQC_RT8TCEN               0x00000002 /* 8 TC no RSS */
-#define IXGBE_MRQC_RT4TCEN               0x00000003 /* 4 TC no RSS */
-#define IXGBE_MRQC_RTRSS8TCEN            0x00000004 /* 8 TC w/ RSS */
-#define IXGBE_MRQC_RTRSS4TCEN            0x00000005 /* 4 TC w/ RSS */
-#define IXGBE_MRQC_VMDQEN                0x00000008 /* VMDq2 64 pools no RSS */
-#define IXGBE_MRQC_VMDQRSS32EN           0x0000000A /* VMDq2 32 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRSS64EN           0x0000000B /* VMDq2 64 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRT8TCEN           0x0000000C /* VMDq2/RT 16 pool 8 TC */
-#define IXGBE_MRQC_VMDQRT4TCEN           0x0000000D /* VMDq2/RT 32 pool 4 TC */
 #define IXGBE_MRQC_RSS_FIELD_MASK        0xFFFF0000
 #define IXGBE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000
 #define IXGBE_MRQC_RSS_FIELD_IPV4        0x00020000
@@ -1684,12 +1041,6 @@
 #define IXGBE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
 #define IXGBE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
-#define IXGBE_MRQC_L3L4TXSWEN            0x00008000
-
-/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE     0x00000001
-#define IXGBE_QDE_IDX_MASK   0x00007F00
-#define IXGBE_QDE_IDX_SHIFT           8
 
 #define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
 #define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
@@ -1701,26 +1052,10 @@
 #define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
 #define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
 
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP                  0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED            0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK               0x18000000
-/* Multiple Transmit Queue Command Register */
-#define IXGBE_MTQC_RT_ENA       0x1 /* DCB Enable */
-#define IXGBE_MTQC_VT_ENA       0x2 /* VMDQ2 Enable */
-#define IXGBE_MTQC_64Q_1PB      0x0 /* 64 queues 1 pack buffer */
-#define IXGBE_MTQC_32VF         0x8 /* 4 TX Queues per pool w/32VF's */
-#define IXGBE_MTQC_64VF         0x4 /* 2 TX Queues per pool w/64VF's */
-#define IXGBE_MTQC_8TC_8TQ      0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
-
 /* Receive Descriptor bit definitions */
 #define IXGBE_RXD_STAT_DD       0x01    /* Descriptor Done */
 #define IXGBE_RXD_STAT_EOP      0x02    /* End of Packet */
-#define IXGBE_RXD_STAT_FLM      0x04    /* FDir Match */
 #define IXGBE_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define IXGBE_RXDADV_NEXTP_MASK   0x000FFFF0 /* Next Descriptor Index */
-#define IXGBE_RXDADV_NEXTP_SHIFT  0x00000004
 #define IXGBE_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
 #define IXGBE_RXD_STAT_L4CS     0x20    /* L4 xsum calculated */
 #define IXGBE_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
@@ -1729,10 +1064,6 @@
 #define IXGBE_RXD_STAT_VEXT     0x200   /* 1st VLAN found */
 #define IXGBE_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
 #define IXGBE_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
-#define IXGBE_RXD_STAT_LLINT    0x800   /* Pkt caused Low Latency Interrupt */
-#define IXGBE_RXD_STAT_TS       0x10000 /* Time Stamp */
-#define IXGBE_RXD_STAT_SECP     0x20000 /* Security Processing */
-#define IXGBE_RXD_STAT_LB       0x40000 /* Loopback Status */
 #define IXGBE_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
 #define IXGBE_RXD_ERR_CE        0x01    /* CRC Error */
 #define IXGBE_RXD_ERR_LE        0x02    /* Length Error */
@@ -1741,13 +1072,6 @@
 #define IXGBE_RXD_ERR_USE       0x20    /* Undersize Error */
 #define IXGBE_RXD_ERR_TCPE      0x40    /* TCP/UDP Checksum Error */
 #define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */
-#define IXGBE_RXDADV_ERR_MASK           0xfff00000 /* RDESC.ERRORS mask */
-#define IXGBE_RXDADV_ERR_SHIFT          20         /* RDESC.ERRORS shift */
-#define IXGBE_RXDADV_ERR_FCEOFE         0x80000000 /* FCoEFe/IPE */
-#define IXGBE_RXDADV_ERR_FCERR          0x00700000 /* FCERR/FDIRERR */
-#define IXGBE_RXDADV_ERR_FDIR_LEN       0x00100000 /* FDIR Length error */
-#define IXGBE_RXDADV_ERR_FDIR_DROP      0x00200000 /* FDIR Drop error */
-#define IXGBE_RXDADV_ERR_FDIR_COLL      0x00400000 /* FDIR Collision error */
 #define IXGBE_RXDADV_ERR_HBO    0x00800000 /*Header Buffer Overflow */
 #define IXGBE_RXDADV_ERR_CE     0x01000000 /* CRC Error */
 #define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */
@@ -1762,30 +1086,9 @@
 #define IXGBE_RXD_CFI_MASK      0x1000  /* CFI is bit 12 */
 #define IXGBE_RXD_CFI_SHIFT     12
 
-#define IXGBE_RXDADV_STAT_DD            IXGBE_RXD_STAT_DD  /* Done */
-#define IXGBE_RXDADV_STAT_EOP           IXGBE_RXD_STAT_EOP /* End of Packet */
-#define IXGBE_RXDADV_STAT_FLM           IXGBE_RXD_STAT_FLM /* FDir Match */
-#define IXGBE_RXDADV_STAT_VP            IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
-#define IXGBE_RXDADV_STAT_MASK          0x000fffff /* Stat/NEXTP: bit 0-19 */
-#define IXGBE_RXDADV_STAT_FCEOFS        0x00000040 /* FCoE EOF/SOF Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT        0x00000030 /* FCoE Pkt Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */
-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
-#define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
-
-/* PSRTYPE bit definitions */
-#define IXGBE_PSRTYPE_TCPHDR    0x00000010
-#define IXGBE_PSRTYPE_UDPHDR    0x00000020
-#define IXGBE_PSRTYPE_IPV4HDR   0x00000100
-#define IXGBE_PSRTYPE_IPV6HDR   0x00000200
-#define IXGBE_PSRTYPE_L2HDR     0x00001000
 
 /* SRRCTL bit definitions */
 #define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */
-#define IXGBE_SRRCTL_RDMTS_SHIFT        22
-#define IXGBE_SRRCTL_RDMTS_MASK         0x01C00000
-#define IXGBE_SRRCTL_DROP_EN            0x10000000
 #define IXGBE_SRRCTL_BSIZEPKT_MASK      0x0000007F
 #define IXGBE_SRRCTL_BSIZEHDR_MASK      0x00003F00
 #define IXGBE_SRRCTL_DESCTYPE_LEGACY    0x00000000
@@ -1800,10 +1103,7 @@
 
 #define IXGBE_RXDADV_RSSTYPE_MASK       0x0000000F
 #define IXGBE_RXDADV_PKTTYPE_MASK       0x0000FFF0
-#define IXGBE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
 #define IXGBE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
-#define IXGBE_RXDADV_RSCCNT_MASK        0x001E0000
-#define IXGBE_RXDADV_RSCCNT_SHIFT       17
 #define IXGBE_RXDADV_HDRBUFLEN_SHIFT    5
 #define IXGBE_RXDADV_SPLITHEADER_EN     0x00001000
 #define IXGBE_RXDADV_SPH                0x8000
@@ -1830,20 +1130,6 @@
 #define IXGBE_RXDADV_PKTTYPE_UDP        0x00000200 /* UDP hdr present */
 #define IXGBE_RXDADV_PKTTYPE_SCTP       0x00000400 /* SCTP hdr present */
 #define IXGBE_RXDADV_PKTTYPE_NFS        0x00000800 /* NFS hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 /* IPSec ESP */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 /* IPSec AH */
-#define IXGBE_RXDADV_PKTTYPE_LINKSEC    0x00004000 /* LinkSec Encap */
-#define IXGBE_RXDADV_PKTTYPE_ETQF       0x00008000 /* PKTTYPE is ETQF index */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 /* ETQF has 8 indices */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4          /* Right-shift 4 bits */
-
-/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP         0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
-
 /* Masks to determine if packets should be dropped due to frame errors */
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
                                       IXGBE_RXD_ERR_CE | \
@@ -1873,20 +1159,10 @@
 #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT  0x000D /* Priority in upper 3 of 16 */
 #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT  IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
 
-/* SR-IOV specific macros */
-#define IXGBE_MBVFICR_INDEX(vf_number)   (vf_number >> 4)
-#define IXGBE_MBVFICR(_i)                (0x00710 + (_i * 4))
-#define IXGBE_VFLRE(_i)                  (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i)                 (0x00700 + (_i * 4))
-
-/* Little Endian defines */
 #ifndef __le16
+/* Little Endian defines */
 #define __le16  u16
-#endif
-#ifndef __le32
 #define __le32  u32
-#endif
-#ifndef __le64
 #define __le64  u64
 
 #endif
@@ -1897,81 +1173,6 @@
 #define __be64  u64
 
 #endif
-enum ixgbe_fdir_pballoc_type {
-       IXGBE_FDIR_PBALLOC_64K = 0,
-       IXGBE_FDIR_PBALLOC_128K,
-       IXGBE_FDIR_PBALLOC_256K,
-};
-#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT           16
-
-/* Flow Director register values */
-#define IXGBE_FDIRCTRL_PBALLOC_64K              0x00000001
-#define IXGBE_FDIRCTRL_PBALLOC_128K             0x00000002
-#define IXGBE_FDIRCTRL_PBALLOC_256K             0x00000003
-#define IXGBE_FDIRCTRL_INIT_DONE                0x00000008
-#define IXGBE_FDIRCTRL_PERFECT_MATCH            0x00000010
-#define IXGBE_FDIRCTRL_REPORT_STATUS            0x00000020
-#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS     0x00000080
-#define IXGBE_FDIRCTRL_DROP_Q_SHIFT             8
-#define IXGBE_FDIRCTRL_FLEX_SHIFT               16
-#define IXGBE_FDIRCTRL_SEARCHLIM                0x00800000
-#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT         24
-#define IXGBE_FDIRCTRL_FULL_THRESH_MASK         0xF0000000
-#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT        28
-
-#define IXGBE_FDIRTCPM_DPORTM_SHIFT             16
-#define IXGBE_FDIRUDPM_DPORTM_SHIFT             16
-#define IXGBE_FDIRIP6M_DIPM_SHIFT               16
-#define IXGBE_FDIRM_VLANID                      0x00000001
-#define IXGBE_FDIRM_VLANP                       0x00000002
-#define IXGBE_FDIRM_POOL                        0x00000004
-#define IXGBE_FDIRM_L3P                         0x00000008
-#define IXGBE_FDIRM_L4P                         0x00000010
-#define IXGBE_FDIRM_FLEX                        0x00000020
-#define IXGBE_FDIRM_DIPv6                       0x00000040
-
-#define IXGBE_FDIRFREE_FREE_MASK                0xFFFF
-#define IXGBE_FDIRFREE_FREE_SHIFT               0
-#define IXGBE_FDIRFREE_COLL_MASK                0x7FFF0000
-#define IXGBE_FDIRFREE_COLL_SHIFT               16
-#define IXGBE_FDIRLEN_MAXLEN_MASK               0x3F
-#define IXGBE_FDIRLEN_MAXLEN_SHIFT              0
-#define IXGBE_FDIRLEN_MAXHASH_MASK              0x7FFF0000
-#define IXGBE_FDIRLEN_MAXHASH_SHIFT             16
-#define IXGBE_FDIRUSTAT_ADD_MASK                0xFFFF
-#define IXGBE_FDIRUSTAT_ADD_SHIFT               0
-#define IXGBE_FDIRUSTAT_REMOVE_MASK             0xFFFF0000
-#define IXGBE_FDIRUSTAT_REMOVE_SHIFT            16
-#define IXGBE_FDIRFSTAT_FADD_MASK               0x00FF
-#define IXGBE_FDIRFSTAT_FADD_SHIFT              0
-#define IXGBE_FDIRFSTAT_FREMOVE_MASK            0xFF00
-#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT           8
-#define IXGBE_FDIRPORT_DESTINATION_SHIFT        16
-#define IXGBE_FDIRVLAN_FLEX_SHIFT               16
-#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT       15
-#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT       16
-
-#define IXGBE_FDIRCMD_CMD_MASK                  0x00000003
-#define IXGBE_FDIRCMD_CMD_ADD_FLOW              0x00000001
-#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW           0x00000002
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT        0x00000003
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH        0x00000007
-#define IXGBE_FDIRCMD_FILTER_UPDATE             0x00000008
-#define IXGBE_FDIRCMD_IPv6DMATCH                0x00000010
-#define IXGBE_FDIRCMD_L4TYPE_UDP                0x00000020
-#define IXGBE_FDIRCMD_L4TYPE_TCP                0x00000040
-#define IXGBE_FDIRCMD_L4TYPE_SCTP               0x00000060
-#define IXGBE_FDIRCMD_IPV6                      0x00000080
-#define IXGBE_FDIRCMD_CLEARHT                   0x00000100
-#define IXGBE_FDIRCMD_DROP                      0x00000200
-#define IXGBE_FDIRCMD_INT                       0x00000400
-#define IXGBE_FDIRCMD_LAST                      0x00000800
-#define IXGBE_FDIRCMD_COLLISION                 0x00001000
-#define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
-#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
-#define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
-#define IXGBE_FDIR_INIT_DONE_POLL               10
-#define IXGBE_FDIRCMD_CMD_POLL                  10
 
 /* Transmit Descriptor - Legacy */
 struct ixgbe_legacy_tx_desc {
@@ -2059,9 +1260,6 @@ struct ixgbe_adv_tx_context_desc {
 
 /* Adv Transmit Descriptor Config Masks */
 #define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buf length(bytes) */
-#define IXGBE_ADVTXD_MAC_LINKSEC      0x00040000 /* Insert LinkSec */
-#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK   0x000003FF /* IPSec SA index */
-#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK    0x000001FF /* IPSec ESP length */
 #define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
 #define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */
 #define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */
@@ -2096,19 +1294,6 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_ADVTXD_TUCMD_L4T_TCP   0x00000800  /* L4 Packet TYPE of TCP */
 #define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */
 #define IXGBE_ADVTXD_TUCMD_MKRREQ    0x00002000 /*Req requires Markers and 
CRC*/
-#define IXGBE_ADVTXD_POPTS_IPSEC      0x00000400 /* IPSec offload request */
-#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
-#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
-#define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (0x3 << 10)      /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF       ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC    ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE      ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS      ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N     (0x0 << 10)      /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T     (0x1 << 10)      /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI    (0x2 << 10)      /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A     (0x3 << 10)      /* 11: EOFa */
 #define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
 #define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
 
@@ -2122,17 +1307,13 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080
 #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
                                         IXGBE_LINK_SPEED_10GB_FULL)
-#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
-                                        IXGBE_LINK_SPEED_1GB_FULL | \
-                                        IXGBE_LINK_SPEED_10GB_FULL)
-
 
 /* Physical layer type */
 typedef u32 ixgbe_physical_layer;
 #define IXGBE_PHYSICAL_LAYER_UNKNOWN      0
 #define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001
 #define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX   0x0004
+#define IXGBE_PHYSICAL_LAYER_100BASE_T    0x0004
 #define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008
 #define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010
 #define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020
@@ -2141,47 +1322,7 @@ typedef u32 ixgbe_physical_layer;
 #define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100
 #define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200
 #define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR   0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-
-/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY    0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
-
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET       0
-#define IXGBE_ATR_SRC_IPV6_OFFSET   2
-#define IXGBE_ATR_SRC_IPV4_OFFSET  14
-#define IXGBE_ATR_DST_IPV6_OFFSET  18
-#define IXGBE_ATR_DST_IPV4_OFFSET  30
-#define IXGBE_ATR_SRC_PORT_OFFSET  34
-#define IXGBE_ATR_DST_PORT_OFFSET  36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET   40
-#define IXGBE_ATR_L4TYPE_OFFSET    41
-
-#define IXGBE_ATR_L4TYPE_MASK      0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
-#define IXGBE_ATR_L4TYPE_UDP       0x1
-#define IXGBE_ATR_L4TYPE_TCP       0x2
-#define IXGBE_ATR_L4TYPE_SCTP      0x3
-#define IXGBE_ATR_HASH_MASK     0x7fff
-
-/* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
-       /* Byte layout in order, all values with MSB first:
-        *
-        * vlan_id    - 2 bytes
-        * src_ip     - 16 bytes
-        * dst_ip     - 16 bytes
-        * src_port   - 2 bytes
-        * dst_port   - 2 bytes
-        * flex_bytes - 2 bytes
-        * vm_pool    - 1 byte
-        * l4type     - 1 byte
-        */
-       u8 byte_stream[42];
-};
+
 
 enum ixgbe_eeprom_type {
        ixgbe_eeprom_uninitialized = 0,
@@ -2192,16 +1333,12 @@ enum ixgbe_eeprom_type {
 enum ixgbe_mac_type {
        ixgbe_mac_unknown = 0,
        ixgbe_mac_82598EB,
-       ixgbe_mac_82599EB,
        ixgbe_num_macs
 };
 
 enum ixgbe_phy_type {
        ixgbe_phy_unknown = 0,
-       ixgbe_phy_none,
        ixgbe_phy_tn,
-       ixgbe_phy_aq,
-       ixgbe_phy_cu_unknown,
        ixgbe_phy_qt,
        ixgbe_phy_xaui,
        ixgbe_phy_nl,
@@ -2210,8 +1347,6 @@ enum ixgbe_phy_type {
        ixgbe_phy_sfp_avago,
        ixgbe_phy_sfp_ftl,
        ixgbe_phy_sfp_unknown,
-       ixgbe_phy_sfp_intel,
-       ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
        ixgbe_phy_generic
 };
 
@@ -2223,19 +1358,11 @@ enum ixgbe_phy_type {
  * 0   SFP_DA_CU
  * 1   SFP_SR
  * 2   SFP_LR
- * 3    SFP_DA_CU_CORE0 - 82599-specific
- * 4    SFP_DA_CU_CORE1 - 82599-specific
- * 5    SFP_SR/LR_CORE0 - 82599-specific
- * 6    SFP_SR/LR_CORE1 - 82599-specific
  */
 enum ixgbe_sfp_type {
        ixgbe_sfp_type_da_cu = 0,
        ixgbe_sfp_type_sr = 1,
        ixgbe_sfp_type_lr = 2,
-       ixgbe_sfp_type_da_cu_core0 = 3,
-       ixgbe_sfp_type_da_cu_core1 = 4,
-       ixgbe_sfp_type_srlr_core0 = 5,
-       ixgbe_sfp_type_srlr_core1 = 6,
        ixgbe_sfp_type_not_present = 0xFFFE,
        ixgbe_sfp_type_unknown = 0xFFFF
 };
@@ -2254,9 +1381,6 @@ enum ixgbe_fc_mode {
        ixgbe_fc_rx_pause,
        ixgbe_fc_tx_pause,
        ixgbe_fc_full,
-#ifdef CONFIG_DCB
-       ixgbe_fc_pfc,
-#endif
        ixgbe_fc_default
 };
 
@@ -2297,6 +1421,7 @@ enum ixgbe_bus_width {
 struct ixgbe_addr_filter_info {
        u32 num_mc_addrs;
        u32 rar_used_count;
+       u32 mc_addr_in_rar_count;
        u32 mta_in_use;
        u32 overflow_promisc;
        bool user_set_promisc;
@@ -2309,7 +1434,6 @@ struct ixgbe_bus_info {
        enum ixgbe_bus_type type;
 
        u16 func;
-       u16 lan_id;
 };
 
 /* Flow control parameters */
@@ -2319,8 +1443,6 @@ struct ixgbe_fc_info {
        u16 pause_time; /* Flow Control Pause timer */
        bool send_xon; /* Flow control send XON */
        bool strict_ieee; /* Strict IEEE mode */
-       bool disable_fc_autoneg; /* Do not autonegotiate FC */
-       bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
        enum ixgbe_fc_mode current_mode; /* FC mode in effect */
        enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
 };
@@ -2382,21 +1504,6 @@ struct ixgbe_hw_stats {
        u64 qptc[16];
        u64 qbrc[16];
        u64 qbtc[16];
-       u64 qprdc[16];
-       u64 pxon2offc[8];
-       u64 fdirustat_add;
-       u64 fdirustat_remove;
-       u64 fdirfstat_fadd;
-       u64 fdirfstat_fremove;
-       u64 fdirmatch;
-       u64 fdirmiss;
-       u64 fccrc;
-       u64 fclast;
-       u64 fcoerpdc;
-       u64 fcoeprc;
-       u64 fcoeptc;
-       u64 fcoedwrc;
-       u64 fcoedwtc;
 };
 
 /* forward declaration */
@@ -2423,18 +1530,11 @@ struct ixgbe_mac_operations {
        enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
        u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
        s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
-       s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
-       s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
-       s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
        s32 (*stop_adapter)(struct ixgbe_hw *);
        s32 (*get_bus_info)(struct ixgbe_hw *);
        void (*set_lan_id)(struct ixgbe_hw *);
        s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
        s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
-       s32 (*setup_sfp)(struct ixgbe_hw *);
-       s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
-       s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
-       void (*release_swfw_sync)(struct ixgbe_hw *, u16);
 
        /* Link */
        s32 (*setup_link)(struct ixgbe_hw *);
@@ -2453,7 +1553,6 @@ struct ixgbe_mac_operations {
        /* RAR, Multicast, VLAN */
        s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
        s32 (*clear_rar)(struct ixgbe_hw *, u32);
-       s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
        s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
@@ -2468,13 +1567,12 @@ struct ixgbe_mac_operations {
        s32 (*init_uta_tables)(struct ixgbe_hw *);
 
        /* Flow Control */
-       s32 (*fc_enable)(struct ixgbe_hw *, s32);
+       s32 (*setup_fc)(struct ixgbe_hw *, s32);
 };
 
 struct ixgbe_phy_operations {
        s32 (*identify)(struct ixgbe_hw *);
        s32 (*identify_sfp)(struct ixgbe_hw *);
-       s32 (*init)(struct ixgbe_hw *);
        s32 (*reset)(struct ixgbe_hw *);
        s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
        s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
@@ -2487,7 +1585,6 @@ struct ixgbe_phy_operations {
        s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
        s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
        s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
-       void (*i2c_bus_clear)(struct ixgbe_hw *);
 };
 
 struct ixgbe_eeprom_info {
@@ -2503,22 +1600,16 @@ struct ixgbe_mac_info {
        enum ixgbe_mac_type             type;
        u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
        u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-       u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
        s32                             mc_filter_type;
        u32                             mcft_size;
        u32                             vft_size;
        u32                             num_rar_entries;
-       u32                             rar_highwater;
        u32                             max_tx_queues;
        u32                             max_rx_queues;
-       u32                             max_msix_vectors;
-       bool                            msix_vectors_from_pcie;
        u32                             orig_autoc;
-       u32                             orig_autoc2;
        bool                            orig_link_settings_stored;
        bool                            autoneg;
        bool                            autoneg_succeeded;
-       bool                            autotry_restart;
 };
 
 struct ixgbe_phy_info {
@@ -2527,13 +1618,11 @@ struct ixgbe_phy_info {
        u32                             addr;
        u32                             id;
        enum ixgbe_sfp_type             sfp_type;
-       bool                            sfp_setup_needed;
        u32                             revision;
        enum ixgbe_media_type           media_type;
        bool                            reset_disable;
        ixgbe_autoneg_advertised        autoneg_advertised;
        bool                            autoneg_wait_to_complete;
-       bool                            multispeed_fiber;
 };
 
 struct ixgbe_hw {
@@ -2556,7 +1645,6 @@ struct ixgbe_hw {
 #define ixgbe_call_func(hw, func, params, error) \
                 (func != NULL) ? func params : error
 
-
 /* Error Codes */
 #define IXGBE_ERR_EEPROM                        -1
 #define IXGBE_ERR_EEPROM_CHECKSUM               -2
@@ -2578,10 +1666,6 @@ struct ixgbe_hw {
 #define IXGBE_ERR_I2C                           -18
 #define IXGBE_ERR_SFP_NOT_SUPPORTED             -19
 #define IXGBE_ERR_SFP_NOT_PRESENT               -20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR               -22
-#define IXGBE_ERR_FDIR_REINIT_FAILED            -23
-#define IXGBE_ERR_EEPROM_VERSION                -24
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
 
diff --git a/drivers/net/ixgbe/kcompat.c b/drivers/net/ixgbe/kcompat.c
index b8dbbaa..1923dc4 100644
--- a/drivers/net/ixgbe/kcompat.c
+++ b/drivers/net/ixgbe/kcompat.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -25,30 +25,17 @@
 
 
*******************************************************************************/
 
+
+
+
+
+
+
+#ifdef DRIVER_IXGBE
 #include "ixgbe.h"
-#include "kcompat.h"
+#endif
 
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
-struct sk_buff *
-_kc_skb_pad(struct sk_buff *skb, int pad)
-{
-        struct sk_buff *nskb;
-        
-        /* If the skbuff is non linear tailroom is always zero.. */
-        if(skb_tailroom(skb) >= pad)
-        {
-                memset(skb->data+skb->len, 0, pad);
-                return skb;
-        }
-        
-        nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + 
pad, GFP_ATOMIC);
-        kfree_skb(skb);
-        if(nskb)
-                memset(nskb->data+nskb->len, 0, pad);
-        return nskb;
-} 
-#endif /* < 2.4.21 */
+#include "kcompat.h"
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
@@ -294,7 +281,7 @@ struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
 int _kc_pci_save_state(struct pci_dev *pdev)
-{
+{ 
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct adapter_struct *adapter = netdev_priv(netdev);
        int size = PCI_CONFIG_SPACE_LEN, i;
@@ -308,7 +295,7 @@ int _kc_pci_save_state(struct pci_dev *pdev)
                size = PCIE_CONFIG_SPACE_LEN;
        }
        pci_config_space_ich8lan();
-#ifdef HAVE_PCI_ERS
+#ifdef HAVE_PCI_ERS 
        if (adapter->config_space == NULL)
 #else
        WARN_ON(adapter->config_space != NULL);
@@ -333,12 +320,12 @@ void _kc_pci_restore_state(struct pci_dev * pdev)
 
        if (adapter->config_space != NULL) {
                pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-               if (pcie_cap_offset &&
+               if (pcie_cap_offset && 
                    !pci_read_config_word(pdev,
                                          pcie_cap_offset + PCIE_LINK_STATUS,
                                          &pcie_link_status))
                        size = PCIE_CONFIG_SPACE_LEN;
-
+       
                pci_config_space_ich8lan();
                for (i = 0; i < (size / 4); i++)
                pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
@@ -373,6 +360,16 @@ void _kc_free_netdev(struct net_device *netdev)
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#ifdef DRIVER_IXGBE
+int ixgbe_sysfs_create(struct ixgbe_adapter *adapter)
+{
+       return 0;
+}
+
+void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter)
+{
+       return;
+}
 
 int ixgbe_dcb_netlink_register()
 {
@@ -383,38 +380,27 @@ int ixgbe_dcb_netlink_unregister()
 {
        return 0;
 }
+#endif /* DRIVER_IXGBE */
 #endif /* < 2.6.23 */
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
 #ifdef NAPI
-/* this function returns the true netdev of the napi struct */
-struct net_device * napi_to_netdev(struct napi_struct *napi)
-{
-       struct adapter_q_vector *q_vector = container_of(napi,
-                                                       struct adapter_q_vector,
-                                                       napi);
-       struct adapter_struct *adapter = q_vector->adapter;
-
-       return adapter->netdev;
-}
-
-int _kc_napi_schedule_prep(struct napi_struct *napi)
-{
-       return (netif_running(napi_to_netdev(napi)) &&
-               netif_rx_schedule_prep(napi_to_poll_dev(napi)));
-}
-
 int __kc_adapter_clean(struct net_device *netdev, int *budget)
 {
        int work_done;
        int work_to_do = min(*budget, netdev->quota);
+#ifdef DRIVER_IXGBE
        /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
        struct napi_struct *napi = netdev->priv;
+#else
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       struct napi_struct *napi = &adapter->rx_ring[0].napi;
+#endif
        work_done = napi->poll(napi, work_to_do);
        *budget -= work_done;
        netdev->quota -= work_done;
-       return (work_done >= work_to_do) ? 1 : 0;
+       return work_done ? 1 : 0;
 }
 #endif /* NAPI */
 #endif /* <= 2.6.24 */
@@ -453,121 +439,4 @@ void _kc_netif_tx_start_all_queues(struct net_device 
*netdev)
                        netif_start_subqueue(netdev, i);
 }
 #endif /* HAVE_TX_MQ */
-#endif /* < 2.6.27 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
-
-int
-_kc_pci_prepare_to_sleep(struct pci_dev *dev)
-{
-       pci_power_t target_state;
-       int error;
-
-       target_state = pci_choose_state(dev, PMSG_SUSPEND);
-
-       pci_enable_wake(dev, target_state, true);
-
-       error = pci_set_power_state(dev, target_state);
-
-       if (error)
-               pci_enable_wake(dev, target_state, false);
-
-       return error;
-}
-
-int
-_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
-{
-       int err;
-
-       err = pci_enable_wake(dev, PCI_D3cold, enable);
-       if (err)
-               goto out;
-
-       err = pci_enable_wake(dev, PCI_D3hot, enable);
-
-out:
-       return err;
-}
-#endif /* < 2.6.28 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
-void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
-{
-       struct pci_dev *parent = pdev->bus->self;
-       u16 link_state;
-       int pos;
-
-       if (!parent)
-               return;
-
-       pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
-       if (pos) {
-               pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
-               link_state &= ~state;
-               pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
-       }
-}
-#endif /* < 2.6.29 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
-#ifdef HAVE_NETDEV_SELECT_QUEUE
-#include <net/ip.h>
-u32 _kc_simple_tx_hashrnd;
-u32 _kc_simple_tx_hashrnd_initialized = 0;
-
-u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
-{
-       u32 addr1, addr2, ports;
-       u32 hash, ihl;
-       u8 ip_proto = 0;
-
-       if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
-               get_random_bytes(&_kc_simple_tx_hashrnd, 4);
-               _kc_simple_tx_hashrnd_initialized = 1;
-       }
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
-                       ip_proto = ip_hdr(skb)->protocol;
-               addr1 = ip_hdr(skb)->saddr;
-               addr2 = ip_hdr(skb)->daddr;
-               ihl = ip_hdr(skb)->ihl;
-               break;
-       case htons(ETH_P_IPV6):
-               ip_proto = ipv6_hdr(skb)->nexthdr;
-               addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
-               addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
-               ihl = (40 >> 2);
-               break;
-       default:
-               return 0;
-       }
-
-
-       switch (ip_proto) {
-       case IPPROTO_TCP:
-       case IPPROTO_UDP:
-       case IPPROTO_DCCP:
-       case IPPROTO_ESP:
-       case IPPROTO_AH:
-       case IPPROTO_SCTP:
-       case IPPROTO_UDPLITE:
-               ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
-               break;
-
-       default:
-               ports = 0;
-               break;
-       }
-
-       hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
-
-       return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
-}
-#endif /* HAVE_NETDEV_SELECT_QUEUE */
-#endif /* < 2.6.30 */
+#endif /* <= 2.6.27 */
diff --git a/drivers/net/ixgbe/kcompat_ethtool.c 
b/drivers/net/ixgbe/kcompat_ethtool.c
index 388fb21..786d42e 100644
--- a/drivers/net/ixgbe/kcompat_ethtool.c
+++ b/drivers/net/ixgbe/kcompat_ethtool.c
@@ -1,7 +1,7 @@
 
/*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -958,7 +958,7 @@ int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct 
ethtool_cmd *ecmd)
        if (bmcr & BMCR_ANENABLE) {
                ecmd->advertising |= ADVERTISED_Autoneg;
                ecmd->autoneg = AUTONEG_ENABLE;
-
+               
                nego = mii_nway_result(advert & lpa);
                if (nego == LPA_100FULL || nego == LPA_100HALF)
                        ecmd->speed = SPEED_100;
@@ -999,9 +999,9 @@ int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct 
ethtool_cmd *ecmd)
                return -EINVAL;
        if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
                return -EINVAL;
-
+                                 
        /* ignore supported, maxtxpkt, maxrxpkt */
-
+       
        if (ecmd->autoneg == AUTONEG_ENABLE) {
                u32 bmcr, advert, tmp;
 
@@ -1026,7 +1026,7 @@ int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct 
ethtool_cmd *ecmd)
                        mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
                        mii->advertising = tmp;
                }
-
+               
                /* turn on autonegotiation, and force a renegotiate */
                bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
                bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
diff --git a/drivers/xen/netchannel2/vmq.c b/drivers/xen/netchannel2/vmq.c
index e36962b..aecfbf7 100644
--- a/drivers/xen/netchannel2/vmq.c
+++ b/drivers/xen/netchannel2/vmq.c
@@ -637,6 +637,9 @@ int vmq_netif_rx(struct sk_buff *skb, int queue_id)
 
        memset(skb_co, 0, sizeof(*skb_co));
 
+       if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+               skb->proto_data_valid = 1;
+
        skb_co->nr_fragments = skb_shinfo(skb)->nr_frags;
        skb_co->type = NC2_PACKET_TYPE_pre_posted;
        skb_co->policy = transmit_policy_vmq;
-- 
1.6.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.