Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
Johan Hedberg says:
====================
pull request: bluetooth-next 2015-10-28
Here are a some more Bluetooth patches for 4.4 which collected up during
the past week. The most important ones are from Kuba Pawlak for fixing
locking issues with SCO sockets. There's also a fix from Alexander Aring
for 6lowpan, a memleak fix from Julia Lawall for the btmrvl driver and
some cleanup patches from Marcel.
Please let me know if there are any issues pulling. Thanks.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index aac9357..f9b9ad7 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -154,8 +154,9 @@
!Finclude/net/cfg80211.h cfg80211_scan_request
!Finclude/net/cfg80211.h cfg80211_scan_done
!Finclude/net/cfg80211.h cfg80211_bss
-!Finclude/net/cfg80211.h cfg80211_inform_bss_width_frame
-!Finclude/net/cfg80211.h cfg80211_inform_bss_width
+!Finclude/net/cfg80211.h cfg80211_inform_bss
+!Finclude/net/cfg80211.h cfg80211_inform_bss_frame_data
+!Finclude/net/cfg80211.h cfg80211_inform_bss_data
!Finclude/net/cfg80211.h cfg80211_unlink_bss
!Finclude/net/cfg80211.h cfg80211_find_ie
!Finclude/net/cfg80211.h ieee80211_bss_get_ie
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index f55aa28..078060a 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -37,6 +37,14 @@
Optional properties:
- status: Should be "ok" or "disabled" for enabled/disabled. Default is "ok".
+- tx-delay: Delay value for RGMII bridge TX clock.
+ Valid values are between 0 to 7, that maps to
+ 417, 717, 1020, 1321, 1611, 1913, 2215, 2514 ps
+ Default value is 4, which corresponds to 1611 ps
+- rx-delay: Delay value for RGMII bridge RX clock.
+ Valid values are between 0 to 7, that maps to
+ 273, 589, 899, 1222, 1480, 1806, 2147, 2464 ps
+ Default value is 2, which corresponds to 899 ps
Example:
menetclk: menetclk {
@@ -72,5 +80,7 @@
/* Board-specific peripheral configurations */
&menet {
+ tx-delay = <4>;
+ rx-delay = <2>;
status = "ok";
};
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 676ecf6..4efca56 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -46,6 +46,7 @@
Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
- mac-address : See ethernet.txt file in the same directory
+- phy-handle : See ethernet.txt file in the same directory
Note: "ti,hwmods" field is used to fetch the base address and irq
resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
index 9940aa0..9c23fdf 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
@@ -12,7 +12,7 @@
mdio@803c0000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "hisilicon,mdio","hisilicon,hns-mdio";
+ compatible = "hisilicon,hns-mdio","hisilicon,mdio";
reg = <0x0 0x803c0000 0x0 0x10000>;
ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
new file mode 100644
index 0000000..974edd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
@@ -0,0 +1,24 @@
+SMSC LAN87xx Ethernet PHY
+
+Some boards require special tuning values. Configure them
+through an Ethernet OF device node.
+
+Optional properties:
+
+- smsc,disable-energy-detect:
+ If set, do not enable energy detect mode for the SMSC phy.
+ default: enable energy detect mode
+
+Examples:
+smsc phy with disabled energy detect mode on an am335x based board.
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&davinci_mdio_default>;
+ pinctrl-1 = <&davinci_mdio_sleep>;
+ status = "okay";
+
+ ethernetphy0: ethernet-phy@0 {
+ reg = <0>;
+ smsc,disable-energy-detect;
+ };
+};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ebe94f2..85752c8 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -384,6 +384,14 @@
Defaults are calculated at boot time from amount of available
memory.
+tcp_min_rtt_wlen - INTEGER
+ The window length of the windowed min filter to track the minimum RTT.
+ A shorter window lets a flow more quickly pick up new (higher)
+ minimum RTT when it is moved to a longer path (e.g., due to traffic
+ engineering). A longer window makes the filter more resistant to RTT
+ inflations such as transient congestion. The unit is seconds.
+ Default: 300
+
tcp_moderate_rcvbuf - BOOLEAN
If set, TCP performs receive buffer auto-tuning, attempting to
automatically size the buffer (no greater than tcp_rmem[2]) to
@@ -425,6 +433,15 @@
you should think about lowering this value, such sockets
may consume significant resources. Cf. tcp_max_orphans.
+tcp_recovery - INTEGER
+ This value is a bitmap to enable various experimental loss recovery
+ features.
+
+ RACK: 0x1 enables the RACK loss detection for fast detection of lost
+ retransmissions and tail drops.
+
+ Default: 0x1
+
tcp_reordering - INTEGER
Initial reordering level of packets in a TCP stream.
TCP stack can then dynamically adjust flow reordering level
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 0714fe5..9199413 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -278,8 +278,8 @@
For a given L2 VLAN domain, the switch device should flood multicast/broadcast
and unknown unicast packets to all ports in domain, if allowed by port's
current STP state. The switch driver, knowing which ports are within which
-vlan L2 domain, can program the switch device for flooding. The packet should
-also be sent to the port netdev for processing by the bridge driver. The
+vlan L2 domain, can program the switch device for flooding. The packet may
+be sent to the port netdev for processing by the bridge driver. The
bridge should not reflood the packet to the same ports the device flooded,
otherwise there will be duplicate packets on the wire.
@@ -298,6 +298,9 @@
of ports scale in the L2 domain as the device is much more efficient at
flooding packets that software.
+If supported by the device, flood control can be offloaded to it, preventing
+certain netdevs from flooding unicast traffic for which there is no FDB entry.
+
IGMP Snooping
^^^^^^^^^^^^^
diff --git a/MAINTAINERS b/MAINTAINERS
index fb8603e..b6d822d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8540,6 +8540,16 @@
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
+QLOGIC QL4xxx ETHERNET DRIVER
+M: Yuval Mintz <Yuval.Mintz@qlogic.com>
+M: Ariel Elior <Ariel.Elior@qlogic.com>
+M: everest-linux-l2@qlogic.com
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/qlogic/qed/
+F: include/linux/qed/
+F: drivers/net/ethernet/qlogic/qede/
+
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/
@@ -8891,6 +8901,13 @@
F: drivers/net/wireless/rtlwifi/
F: drivers/net/wireless/rtlwifi/rtl8192ce/
+RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
+M: Jes Sorensen <Jes.Sorensen@redhat.com>
+L: linux-wireless@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211
+S: Maintained
+F: drivers/net/wireless/realtek/rtl8xxxu/
+
S3 SAVAGE FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev@vger.kernel.org
diff --git a/arch/arm/mach-gemini/board-nas4220b.c b/arch/arm/mach-gemini/board-nas4220b.c
index ca8a25b..18b1279 100644
--- a/arch/arm/mach-gemini/board-nas4220b.c
+++ b/arch/arm/mach-gemini/board-nas4220b.c
@@ -18,7 +18,6 @@
#include <linux/leds.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
#include <linux/io.h>
#include <asm/setup.h>
diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c
index 418188c..14c56f3 100644
--- a/arch/arm/mach-gemini/board-wbd111.c
+++ b/arch/arm/mach-gemini/board-wbd111.c
@@ -15,7 +15,6 @@
#include <linux/input.h>
#include <linux/skbuff.h>
#include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c
index 266b265..6070282 100644
--- a/arch/arm/mach-gemini/board-wbd222.c
+++ b/arch/arm/mach-gemini/board-wbd222.c
@@ -15,7 +15,6 @@
#include <linux/input.h>
#include <linux/skbuff.h>
#include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <asm/mach-types.h>
diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
index 3500586..606dd5a 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
@@ -13,14 +13,12 @@
reg = <0x0 0x803c0000 0x0 0x10000
0x0 0x80000000 0x0 0x10000>;
- soc0_phy4: ethernet-phy@4 {
+ soc0_phy0: ethernet-phy@0 {
reg = <0x0>;
- device_type = "ethernet-phy";
compatible = "ethernet-phy-ieee802.3-c22";
};
- soc0_phy5: ethernet-phy@5 {
+ soc0_phy1: ethernet-phy@1 {
reg = <0x1>;
- device_type = "ethernet-phy";
compatible = "ethernet-phy-ieee802.3-c22";
};
};
@@ -37,7 +35,7 @@
0x0 0xc7000000 0x0 0x60000
>;
- phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+ phy-handle = <0 0 0 0 &soc0_phy0 &soc0_phy1 0 0>;
interrupts = <
/* [14] ge fifo err 8 / xge 6**/
149 0x4 150 0x4 151 0x4 152 0x4
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 18accb0..c53a53f 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,7 +1247,7 @@
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
- struct sk_buff *skb;
+ struct sk_buff *skb, *nskb;
struct Layer2 *l2 = &st->l2;
u_char header[MAX_HEADER_LEN];
int i, hdr_space_needed;
@@ -1262,14 +1262,10 @@
return;
hdr_space_needed = l2headersize(l2, 0);
- if (hdr_space_needed > skb_headroom(skb)) {
- struct sk_buff *orig_skb = skb;
-
- skb = skb_realloc_headroom(skb, hdr_space_needed);
- if (!skb) {
- dev_kfree_skb(orig_skb);
- return;
- }
+ nskb = skb_realloc_headroom(skb, hdr_space_needed);
+ if (!nskb) {
+ skb_queue_head(&l2->i_queue, skb);
+ return;
}
spin_lock_irqsave(&l2->lock, flags);
if (test_bit(FLG_MOD128, &l2->flag))
@@ -1282,7 +1278,7 @@
p1);
dev_kfree_skb(l2->windowar[p1]);
}
- l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
+ l2->windowar[p1] = skb;
i = sethdraddr(&st->l2, header, CMD);
@@ -1295,8 +1291,8 @@
l2->vs = (l2->vs + 1) % 8;
}
spin_unlock_irqrestore(&l2->lock, flags);
- memcpy(skb_push(skb, i), header, i);
- st->l2.l2l1(st, PH_PULL | INDICATION, skb);
+ memcpy(skb_push(nskb, i), header, i);
+ st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
FsmDelTimer(&st->l2.t203, 13);
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 949cabb..5eb380a 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -1476,7 +1476,7 @@
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
- struct sk_buff *skb, *nskb, *oskb;
+ struct sk_buff *skb, *nskb;
u_char header[MAX_L2HEADER_LEN];
u_int i, p1;
@@ -1486,11 +1486,26 @@
skb = skb_dequeue(&l2->i_queue);
if (!skb)
return;
-
- if (test_bit(FLG_MOD128, &l2->flag))
+ i = sethdraddr(l2, header, CMD);
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ header[i++] = l2->vs << 1;
+ header[i++] = l2->vr << 1;
+ } else
+ header[i++] = (l2->vr << 5) | (l2->vs << 1);
+ nskb = skb_realloc_headroom(skb, i);
+ if (!nskb) {
+ printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
+ mISDNDevName4ch(&l2->ch), i);
+ skb_queue_head(&l2->i_queue, skb);
+ return;
+ }
+ if (test_bit(FLG_MOD128, &l2->flag)) {
p1 = (l2->vs - l2->va) % 128;
- else
+ l2->vs = (l2->vs + 1) % 128;
+ } else {
p1 = (l2->vs - l2->va) % 8;
+ l2->vs = (l2->vs + 1) % 8;
+ }
p1 = (p1 + l2->sow) % l2->window;
if (l2->windowar[p1]) {
printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
@@ -1498,36 +1513,7 @@
dev_kfree_skb(l2->windowar[p1]);
}
l2->windowar[p1] = skb;
- i = sethdraddr(l2, header, CMD);
- if (test_bit(FLG_MOD128, &l2->flag)) {
- header[i++] = l2->vs << 1;
- header[i++] = l2->vr << 1;
- l2->vs = (l2->vs + 1) % 128;
- } else {
- header[i++] = (l2->vr << 5) | (l2->vs << 1);
- l2->vs = (l2->vs + 1) % 8;
- }
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- p1 = skb_headroom(nskb);
- if (p1 >= i)
- memcpy(skb_push(nskb, i), header, i);
- else {
- printk(KERN_WARNING
- "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
- mISDNDevName4ch(&l2->ch), i, p1);
- oskb = nskb;
- nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
- if (!nskb) {
- dev_kfree_skb(oskb);
- printk(KERN_WARNING "%s: no skb mem in %s\n",
- mISDNDevName4ch(&l2->ch), __func__);
- return;
- }
- memcpy(skb_put(nskb, i), header, i);
- memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
- dev_kfree_skb(oskb);
- }
+ memcpy(skb_push(nskb, i), header, i);
l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index d7fdea1..20bfb9b 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -237,6 +237,8 @@
numsegs; /* number of segments */
};
+#define ARCNET_LED_NAME_SZ (IFNAMSIZ + 6)
+
struct arcnet_local {
uint8_t config, /* current value of CONFIG register */
timeout, /* Extended timeout for COM20020 */
@@ -260,6 +262,13 @@
/* On preemtive and SMB a lock is needed */
spinlock_t lock;
+ struct led_trigger *tx_led_trig;
+ char tx_led_trig_name[ARCNET_LED_NAME_SZ];
+ struct led_trigger *recon_led_trig;
+ char recon_led_trig_name[ARCNET_LED_NAME_SZ];
+
+ struct timer_list timer;
+
/*
* Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
* which can be used for either sending or receiving. The new dynamic
@@ -309,6 +318,8 @@
int (*reset)(struct net_device *dev, int really_reset);
void (*open)(struct net_device *dev);
void (*close)(struct net_device *dev);
+ void (*datatrigger) (struct net_device * dev, int enable);
+ void (*recontrigger) (struct net_device * dev, int enable);
void (*copy_to_card)(struct net_device *dev, int bufnum,
int offset, void *buf, int count);
@@ -319,6 +330,16 @@
void __iomem *mem_start; /* pointer to ioremap'ed MMIO */
};
+enum arcnet_led_event {
+ ARCNET_LED_EVENT_RECON,
+ ARCNET_LED_EVENT_OPEN,
+ ARCNET_LED_EVENT_STOP,
+ ARCNET_LED_EVENT_TX,
+};
+
+void arcnet_led_event(struct net_device *netdev, enum arcnet_led_event event);
+void devm_arcnet_led_init(struct net_device *netdev, int index, int subid);
+
#if ARCNET_DEBUG_MAX & D_SKB
void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
#else
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index e41dd36..6ea963e 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -52,6 +52,8 @@
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/leds.h>
+
#include "arcdevice.h"
#include "com9026.h"
@@ -189,6 +191,71 @@
#endif
+/* Trigger a LED event in response to a ARCNET device event */
+void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event)
+{
+ struct arcnet_local *lp = netdev_priv(dev);
+ unsigned long led_delay = 350;
+ unsigned long tx_delay = 50;
+
+ switch (event) {
+ case ARCNET_LED_EVENT_RECON:
+ led_trigger_blink_oneshot(lp->recon_led_trig,
+ &led_delay, &led_delay, 0);
+ break;
+ case ARCNET_LED_EVENT_OPEN:
+ led_trigger_event(lp->tx_led_trig, LED_OFF);
+ led_trigger_event(lp->recon_led_trig, LED_OFF);
+ break;
+ case ARCNET_LED_EVENT_STOP:
+ led_trigger_event(lp->tx_led_trig, LED_OFF);
+ led_trigger_event(lp->recon_led_trig, LED_OFF);
+ break;
+ case ARCNET_LED_EVENT_TX:
+ led_trigger_blink_oneshot(lp->tx_led_trig,
+ &tx_delay, &tx_delay, 0);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(arcnet_led_event);
+
+static void arcnet_led_release(struct device *gendev, void *res)
+{
+ struct arcnet_local *lp = netdev_priv(to_net_dev(gendev));
+
+ led_trigger_unregister_simple(lp->tx_led_trig);
+ led_trigger_unregister_simple(lp->recon_led_trig);
+}
+
+/* Register ARCNET LED triggers for a arcnet device
+ *
+ * This is normally called from a driver's probe function
+ */
+void devm_arcnet_led_init(struct net_device *netdev, int index, int subid)
+{
+ struct arcnet_local *lp = netdev_priv(netdev);
+ void *res;
+
+ res = devres_alloc(arcnet_led_release, 0, GFP_KERNEL);
+ if (!res) {
+ netdev_err(netdev, "cannot register LED triggers\n");
+ return;
+ }
+
+ snprintf(lp->tx_led_trig_name, sizeof(lp->tx_led_trig_name),
+ "arc%d-%d-tx", index, subid);
+ snprintf(lp->recon_led_trig_name, sizeof(lp->recon_led_trig_name),
+ "arc%d-%d-recon", index, subid);
+
+ led_trigger_register_simple(lp->tx_led_trig_name,
+ &lp->tx_led_trig);
+ led_trigger_register_simple(lp->recon_led_trig_name,
+ &lp->recon_led_trig);
+
+ devres_add(&netdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_arcnet_led_init);
+
/* Unregister a protocol driver from the arc_proto_map. Protocol drivers
* are responsible for registering themselves, but the unregister routine
* is pretty generic so we'll do it here.
@@ -314,6 +381,16 @@
dev->flags = IFF_BROADCAST;
}
+static void arcnet_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+
+ if (!netif_carrier_ok(dev)) {
+ netif_carrier_on(dev);
+ netdev_info(dev, "link up\n");
+ }
+}
+
struct net_device *alloc_arcdev(const char *name)
{
struct net_device *dev;
@@ -325,6 +402,9 @@
struct arcnet_local *lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
+ init_timer(&lp->timer);
+ lp->timer.data = (unsigned long) dev;
+ lp->timer.function = arcnet_timer;
}
return dev;
@@ -423,8 +503,11 @@
lp->hw.intmask(dev, lp->intmask);
arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
+ netif_carrier_off(dev);
netif_start_queue(dev);
+ mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000));
+ arcnet_led_event(dev, ARCNET_LED_EVENT_OPEN);
return 0;
out_module_put:
@@ -438,7 +521,11 @@
{
struct arcnet_local *lp = netdev_priv(dev);
+ arcnet_led_event(dev, ARCNET_LED_EVENT_STOP);
+ del_timer_sync(&lp->timer);
+
netif_stop_queue(dev);
+ netif_carrier_off(dev);
/* flush TX and disable RX */
lp->hw.intmask(dev, 0);
@@ -515,7 +602,7 @@
struct ArcProto *proto;
int txbuf;
unsigned long flags;
- int freeskb, retval;
+ int retval;
arc_printk(D_DURING, dev,
"transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
@@ -554,15 +641,13 @@
* the package later - forget about it now
*/
dev->stats.tx_bytes += skb->len;
- freeskb = 1;
+ dev_kfree_skb(skb);
} else {
/* do it the 'split' way */
lp->outgoing.proto = proto;
lp->outgoing.skb = skb;
lp->outgoing.pkt = pkt;
- freeskb = 0;
-
if (proto->continue_tx &&
proto->continue_tx(dev, txbuf)) {
arc_printk(D_NORMAL, dev,
@@ -574,7 +659,6 @@
lp->next_tx = txbuf;
} else {
retval = NETDEV_TX_BUSY;
- freeskb = 0;
}
arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
@@ -588,10 +672,9 @@
arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
__FILE__, __LINE__, __func__, lp->hw.status(dev));
- spin_unlock_irqrestore(&lp->lock, flags);
- if (freeskb)
- dev_kfree_skb(skb);
+ arcnet_led_event(dev, ARCNET_LED_EVENT_TX);
+ spin_unlock_irqrestore(&lp->lock, flags);
return retval; /* no need to try again */
}
EXPORT_SYMBOL(arcnet_send_packet);
@@ -843,6 +926,13 @@
arc_printk(D_RECON, dev, "Network reconfiguration detected (status=%Xh)\n",
status);
+ if (netif_carrier_ok(dev)) {
+ netif_carrier_off(dev);
+ netdev_info(dev, "link down\n");
+ }
+ mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000));
+
+ arcnet_led_event(dev, ARCNET_LED_EVENT_RECON);
/* MYRECON bit is at bit 7 of diagstatus */
if (diagstatus & 0x80)
arc_printk(D_RECON, dev, "Put out that recon myself\n");
@@ -893,6 +983,7 @@
lp->num_recons = lp->network_down = 0;
arc_printk(D_DURING, dev, "not recon: clearing counters anyway.\n");
+ netif_carrier_on(dev);
}
if (didsomething)
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index a12bf83..239de38 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -41,6 +41,7 @@
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/leds.h>
#include "arcdevice.h"
#include "com20020.h"
@@ -62,12 +63,43 @@
module_param(clockm, int, 0);
MODULE_LICENSE("GPL");
+static void led_tx_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct com20020_dev *card;
+ struct com20020_priv *priv;
+ struct com20020_pci_card_info *ci;
+
+ card = container_of(led_cdev, struct com20020_dev, tx_led);
+
+ priv = card->pci_priv;
+ ci = priv->ci;
+
+ outb(!!value, priv->misc + ci->leds[card->index].green);
+}
+
+static void led_recon_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct com20020_dev *card;
+ struct com20020_priv *priv;
+ struct com20020_pci_card_info *ci;
+
+ card = container_of(led_cdev, struct com20020_dev, recon_led);
+
+ priv = card->pci_priv;
+ ci = priv->ci;
+
+ outb(!!value, priv->misc + ci->leds[card->index].red);
+}
+
static void com20020pci_remove(struct pci_dev *pdev);
static int com20020pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct com20020_pci_card_info *ci;
+ struct com20020_pci_channel_map *mm;
struct net_device *dev;
struct arcnet_local *lp;
struct com20020_priv *priv;
@@ -84,9 +116,22 @@
ci = (struct com20020_pci_card_info *)id->driver_data;
priv->ci = ci;
+ mm = &ci->misc_map;
INIT_LIST_HEAD(&priv->list_dev);
+ if (mm->size) {
+ ioaddr = pci_resource_start(pdev, mm->bar) + mm->offset;
+ r = devm_request_region(&pdev->dev, ioaddr, mm->size,
+ "com20020-pci");
+ if (!r) {
+ pr_err("IO region %xh-%xh already allocated.\n",
+ ioaddr, ioaddr + mm->size - 1);
+ return -EBUSY;
+ }
+ priv->misc = ioaddr;
+ }
+
for (i = 0; i < ci->devcount; i++) {
struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
struct com20020_dev *card;
@@ -96,6 +141,7 @@
ret = -ENOMEM;
goto out_port;
}
+ dev->dev_port = i;
dev->netdev_ops = &com20020_netdev_ops;
@@ -131,6 +177,13 @@
lp->timeout = timeout;
lp->hw.owner = THIS_MODULE;
+ /* Get the dev_id from the PLX rotary coder */
+ if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+ dev->dev_id = 0xc;
+ dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4;
+
+ snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+
if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
pr_err("IO address %Xh is empty!\n", ioaddr);
ret = -EIO;
@@ -148,14 +201,41 @@
card->index = i;
card->pci_priv = priv;
+ card->tx_led.brightness_set = led_tx_set;
+ card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-tx",
+ dev->dev_id, i);
+ card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:green:tx:%d-%d",
+ dev->dev_id, i);
+
+ card->tx_led.dev = &dev->dev;
+ card->recon_led.brightness_set = led_recon_set;
+ card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-recon",
+ dev->dev_id, i);
+ card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:red:recon:%d-%d",
+ dev->dev_id, i);
+ card->recon_led.dev = &dev->dev;
card->dev = dev;
+ ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+ if (ret)
+ goto out_port;
+
+ ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+ if (ret)
+ goto out_port;
+
dev_set_drvdata(&dev->dev, card);
ret = com20020_found(dev, IRQF_SHARED);
if (ret)
goto out_port;
+ devm_arcnet_led_init(dev, dev->dev_id, i);
+
list_add(&card->list, &priv->list_dev);
}
@@ -234,6 +314,18 @@
.size = 0x08,
},
},
+ .misc_map = {
+ .bar = 2,
+ .offset = 0x10,
+ .size = 0x04,
+ },
+ .leds = {
+ {
+ .green = 0x0,
+ .red = 0x1,
+ },
+ },
+ .rotary = 0x0,
.flags = ARC_CAN_10MBIT,
};
@@ -251,6 +343,21 @@
.size = 0x08,
}
},
+ .misc_map = {
+ .bar = 2,
+ .offset = 0x10,
+ .size = 0x04,
+ },
+ .leds = {
+ {
+ .green = 0x0,
+ .red = 0x1,
+ }, {
+ .green = 0x2,
+ .red = 0x3,
+ },
+ },
+ .rotary = 0x0,
.flags = ARC_CAN_10MBIT,
};
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index c82f323..13d9ad4 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -118,7 +118,7 @@
arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
}
- lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
+ lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
/* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
arcnet_outb(0x42, ioaddr, COM20020_REG_W_XREG);
@@ -131,11 +131,6 @@
}
arc_printk(D_INIT_REASONS, dev, "status after reset: %X\n", status);
- /* Enable TX */
- lp->config |= TXENcfg;
- arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
- arcnet_outb(arcnet_inb(ioaddr, 8), ioaddr, COM20020_REG_W_XREG);
-
arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear,
ioaddr, COM20020_REG_W_COMMAND);
status = arcnet_inb(ioaddr, COM20020_REG_R_STATUS);
@@ -169,9 +164,33 @@
return 0;
}
+static int com20020_netdev_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ lp->config |= TXENcfg;
+ arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+
+ return arcnet_open(dev);
+}
+
+static int com20020_netdev_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ arcnet_close(dev);
+
+ /* disable transmitter */
+ lp->config &= ~TXENcfg;
+ arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+ return 0;
+}
+
const struct net_device_ops com20020_netdev_ops = {
- .ndo_open = arcnet_open,
- .ndo_stop = arcnet_close,
+ .ndo_open = com20020_netdev_open,
+ .ndo_stop = com20020_netdev_close,
.ndo_start_xmit = arcnet_send_packet,
.ndo_tx_timeout = arcnet_timeout,
.ndo_set_mac_address = com20020_set_hwaddr,
@@ -215,7 +234,7 @@
arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
}
- lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
+ lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
/* Default 0x38 + register: Node ID */
arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
@@ -274,7 +293,7 @@
dev->name, arcnet_inb(ioaddr, COM20020_REG_R_STATUS));
arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
- lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
+ lp->config |= (lp->timeout << 3) | (lp->backplane << 2);
/* power-up defaults */
arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
diff --git a/drivers/net/arcnet/com20020.h b/drivers/net/arcnet/com20020.h
index 22a460f..0bcc5d0 100644
--- a/drivers/net/arcnet/com20020.h
+++ b/drivers/net/arcnet/com20020.h
@@ -26,6 +26,7 @@
*/
#ifndef __COM20020_H
#define __COM20020_H
+#include <linux/leds.h>
int com20020_check(struct net_device *dev);
int com20020_found(struct net_device *dev, int shared);
@@ -36,6 +37,11 @@
#define PLX_PCI_MAX_CARDS 2
+struct ledoffsets {
+ int green;
+ int red;
+};
+
struct com20020_pci_channel_map {
u32 bar;
u32 offset;
@@ -47,6 +53,10 @@
int devcount;
struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
+ struct com20020_pci_channel_map misc_map;
+
+ struct ledoffsets leds[PLX_PCI_MAX_CARDS];
+ int rotary;
unsigned int flags;
};
@@ -54,12 +64,16 @@
struct com20020_priv {
struct com20020_pci_card_info *ci;
struct list_head list_dev;
+ resource_size_t misc;
};
struct com20020_dev {
struct list_head list;
struct net_device *dev;
+ struct led_classdev tx_led;
+ struct led_classdev recon_led;
+
struct com20020_priv *pci_priv;
int index;
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9d56515..6f946fe 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -21,10 +21,13 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_net.h>
#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
#include <linux/brcmphy.h>
+#include <linux/etherdevice.h>
+#include <net/switchdev.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -264,6 +267,50 @@
}
}
+static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
+ int port)
+{
+ unsigned int off;
+
+ switch (port) {
+ case 7:
+ off = P7_IRQ_OFF;
+ break;
+ case 0:
+ /* Port 0 interrupts are located on the first bank */
+ intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
+ return;
+ default:
+ off = P_IRQ_OFF(port);
+ break;
+ }
+
+ intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
+}
+
+static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
+ int port)
+{
+ unsigned int off;
+
+ switch (port) {
+ case 7:
+ off = P7_IRQ_OFF;
+ break;
+ case 0:
+ /* Port 0 interrupts are located on the first bank */
+ intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
+ intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
+ return;
+ default:
+ off = P_IRQ_OFF(port);
+ break;
+ }
+
+ intrl2_1_mask_set(priv, P_IRQ_MASK(off));
+ intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
+}
+
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
@@ -280,7 +327,7 @@
core_writel(priv, 0, CORE_G_PCTL_PORT(port));
/* Re-enable the GPHY and re-apply workarounds */
- if (port == 0 && priv->hw_params.num_gphy == 1) {
+ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
bcm_sf2_gphy_enable_set(ds, true);
if (phy) {
/* if phy_stop() has been called before, phy
@@ -297,9 +344,9 @@
}
}
- /* Enable port 7 interrupts to get notified */
- if (port == 7)
- intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
+ /* Enable MoCA port interrupts to get notified */
+ if (port == priv->moca_port)
+ bcm_sf2_port_intr_enable(priv, port);
/* Set this port, and only this one to be in the default VLAN,
* if member of a bridge, restore its membership prior to
@@ -329,12 +376,10 @@
if (priv->wol_ports_mask & (1 << port))
return;
- if (port == 7) {
- intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF));
- intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
- }
+ if (port == priv->moca_port)
+ bcm_sf2_port_intr_disable(priv, port);
- if (port == 0 && priv->hw_params.num_gphy == 1)
+ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, false);
if (dsa_is_cpu_port(ds, port))
@@ -555,6 +600,236 @@
return 0;
}
+/* Address Resolution Logic routines */
+static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 10;
+ u32 reg;
+
+ do {
+ reg = core_readl(priv, CORE_ARLA_RWCTL);
+ if (!(reg & ARL_STRTDN))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
+{
+ u32 cmd;
+
+ if (op > ARL_RW)
+ return -EINVAL;
+
+ cmd = core_readl(priv, CORE_ARLA_RWCTL);
+ cmd &= ~IVL_SVL_SELECT;
+ cmd |= ARL_STRTDN;
+ if (op)
+ cmd |= ARL_RW;
+ else
+ cmd &= ~ARL_RW;
+ core_writel(priv, cmd, CORE_ARLA_RWCTL);
+
+ return bcm_sf2_arl_op_wait(priv);
+}
+
+static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
+ u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
+ bool is_valid)
+{
+ unsigned int i;
+ int ret;
+
+ ret = bcm_sf2_arl_op_wait(priv);
+ if (ret)
+ return ret;
+
+ /* Read the 4 bins */
+ for (i = 0; i < 4; i++) {
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
+ fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
+ bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
+
+ if (ent->is_valid && is_valid) {
+ *idx = i;
+ return 0;
+ }
+
+ /* This is the MAC we just deleted */
+ if (!is_valid && (mac_vid & mac))
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
+ const unsigned char *addr, u16 vid, bool is_valid)
+{
+ struct bcm_sf2_arl_entry ent;
+ u32 fwd_entry;
+ u64 mac, mac_vid = 0;
+ u8 idx = 0;
+ int ret;
+
+ /* Convert the array into a 64-bit MAC */
+ mac = bcm_sf2_mac_to_u64(addr);
+
+ /* Perform a read for the given MAC and VID */
+ core_writeq(priv, mac, CORE_ARLA_MAC);
+ core_writel(priv, vid, CORE_ARLA_VID);
+
+ /* Issue a read operation for this MAC */
+ ret = bcm_sf2_arl_rw_op(priv, 1);
+ if (ret)
+ return ret;
+
+ ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
+ /* If this is a read, just finish now */
+ if (op)
+ return ret;
+
+ /* We could not find a matching MAC, so reset to a new entry */
+ if (ret) {
+ fwd_entry = 0;
+ idx = 0;
+ }
+
+ memset(&ent, 0, sizeof(ent));
+ ent.port = port;
+ ent.is_valid = is_valid;
+ ent.vid = vid;
+ ent.is_static = true;
+ memcpy(ent.mac, addr, ETH_ALEN);
+ bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+
+ core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
+ core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
+
+ ret = bcm_sf2_arl_rw_op(priv, 0);
+ if (ret)
+ return ret;
+
+ /* Re-read the entry to check */
+ return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
+}
+
+static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ /* We do not need to do anything specific here yet */
+ return 0;
+}
+
+static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
+}
+
+static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
+}
+
+static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
+{
+ unsigned timeout = 1000;
+ u32 reg;
+
+ do {
+ reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
+ if (!(reg & ARLA_SRCH_STDN))
+ return 0;
+
+ if (reg & ARLA_SRCH_VLID)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
+ struct bcm_sf2_arl_entry *ent)
+{
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
+ fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
+ bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
+ const struct bcm_sf2_arl_entry *ent,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ if (!ent->is_valid)
+ return 0;
+
+ if (port != ent->port)
+ return 0;
+
+ ether_addr_copy(fdb->addr, ent->mac);
+ fdb->vid = ent->vid;
+ fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ return cb(&fdb->obj);
+}
+
+static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct net_device *dev = ds->ports[port];
+ struct bcm_sf2_arl_entry results[2];
+ unsigned int count = 0;
+ int ret;
+
+ /* Start search operation */
+ core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
+
+ do {
+ ret = bcm_sf2_arl_search_wait(priv);
+ if (ret)
+ return ret;
+
+ /* Read both entries, then return their values back */
+ bcm_sf2_arl_search_rd(priv, 0, &results[0]);
+ ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
+ if (ret)
+ return ret;
+
+ bcm_sf2_arl_search_rd(priv, 1, &results[1]);
+ ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
+ if (ret)
+ return ret;
+
+ if (!results[0].is_valid && !results[1].is_valid)
+ break;
+
+ } while (count++ < CORE_ARLA_NUM_ENTRIES);
+
+ return 0;
+}
+
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
struct bcm_sf2_priv *priv = dev_id;
@@ -615,6 +890,42 @@
intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
+static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
+ struct device_node *dn)
+{
+ struct device_node *port;
+ const char *phy_mode_str;
+ int mode;
+ unsigned int port_num;
+ int ret;
+
+ priv->moca_port = -1;
+
+ for_each_available_child_of_node(dn, port) {
+ if (of_property_read_u32(port, "reg", &port_num))
+ continue;
+
+ /* Internal PHYs get assigned a specific 'phy-mode' property
+ * value: "internal" to help flag them before MDIO probing
+ * has completed, since they might be turned off at that
+ * time
+ */
+ mode = of_get_phy_mode(port);
+ if (mode < 0) {
+ ret = of_property_read_string(port, "phy-mode",
+ &phy_mode_str);
+ if (ret < 0)
+ continue;
+
+ if (!strcasecmp(phy_mode_str, "internal"))
+ priv->int_phy_mask |= 1 << port_num;
+ }
+
+ if (mode == PHY_INTERFACE_MODE_MOCA)
+ priv->moca_port = port_num;
+ }
+}
+
static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -633,6 +944,7 @@
* level
*/
dn = ds->pd->of_node->parent;
+ bcm_sf2_identify_ports(priv, ds->pd->of_node);
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
@@ -913,7 +1225,7 @@
status->link = 0;
- /* Port 7 is special as we do not get link status from CORE_LNKSTS,
+ /* MoCA port is special as we do not get link status from CORE_LNKSTS,
* which means that we need to force the link at the port override
* level to get the data to flow. We do use what the interrupt handler
* did determine before.
@@ -921,7 +1233,7 @@
* For the other ports, we just force the link status, since this is
* a fixed PHY device.
*/
- if (port == 7) {
+ if (port == priv->moca_port) {
status->link = priv->port_sts[port].link;
/* For MoCA interfaces, also force a link down notification
* since some version of the user-space daemon (mocad) use
@@ -1076,6 +1388,10 @@
.port_join_bridge = bcm_sf2_sw_br_join,
.port_leave_bridge = bcm_sf2_sw_br_leave,
.port_stp_update = bcm_sf2_sw_br_set_stp_state,
+ .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
+ .port_fdb_add = bcm_sf2_sw_fdb_add,
+ .port_fdb_del = bcm_sf2_sw_fdb_del,
+ .port_fdb_dump = bcm_sf2_sw_fdb_dump,
};
static int __init bcm_sf2_init(void)
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 789d7b7..6bba1c9 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -19,6 +19,8 @@
#include <linux/mutex.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
#include <net/dsa.h>
@@ -50,6 +52,60 @@
u32 vlan_ctl_mask;
};
+struct bcm_sf2_arl_entry {
+ u8 port;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 is_valid:1;
+ u8 is_age:1;
+ u8 is_static:1;
+};
+
+static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
+{
+ unsigned int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
+}
+
+static inline u64 bcm_sf2_mac_to_u64(const u8 *src)
+{
+ unsigned int i;
+ u64 dst = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
+
+ return dst;
+}
+
+static inline void bcm_sf2_arl_to_entry(struct bcm_sf2_arl_entry *ent,
+ u64 mac_vid, u32 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & PORTID_MASK;
+ ent->is_valid = !!(fwd_entry & ARL_VALID);
+ ent->is_age = !!(fwd_entry & ARL_AGE);
+ ent->is_static = !!(fwd_entry & ARL_STATIC);
+ bcm_sf2_mac_from_u64(mac_vid, ent->mac);
+ ent->vid = mac_vid >> VID_SHIFT;
+}
+
+static inline void bcm_sf2_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
+ const struct bcm_sf2_arl_entry *ent)
+{
+ *mac_vid = bcm_sf2_mac_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & VID_MASK) << VID_SHIFT;
+ *fwd_entry = ent->port & PORTID_MASK;
+ if (ent->is_valid)
+ *fwd_entry |= ARL_VALID;
+ if (ent->is_static)
+ *fwd_entry |= ARL_STATIC;
+ if (ent->is_age)
+ *fwd_entry |= ARL_AGE;
+}
+
struct bcm_sf2_priv {
/* Base registers, keep those in order with BCM_SF2_REGS_NAME */
void __iomem *core;
@@ -78,6 +134,12 @@
/* Mask of ports enabled for Wake-on-LAN */
u32 wol_ports_mask;
+
+ /* MoCA port location */
+ int moca_port;
+
+ /* Bitmask of ports having an integrated PHY */
+ unsigned int int_phy_mask;
};
struct bcm_sf2_hw_stats {
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index fa4e6e7..97780d4 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -231,6 +231,49 @@
#define CORE_BRCM_HDR_RX_DIS 0x0980
#define CORE_BRCM_HDR_TX_DIS 0x0988
+#define CORE_ARLA_NUM_ENTRIES 1024
+
+#define CORE_ARLA_RWCTL 0x1400
+#define ARL_RW (1 << 0)
+#define IVL_SVL_SELECT (1 << 6)
+#define ARL_STRTDN (1 << 7)
+
+#define CORE_ARLA_MAC 0x1408
+#define CORE_ARLA_VID 0x1420
+#define ARLA_VIDTAB_INDX_MASK 0x1fff
+
+#define CORE_ARLA_MACVID0 0x1440
+#define MAC_MASK 0xffffffffff
+#define VID_SHIFT 48
+#define VID_MASK 0xfff
+
+#define CORE_ARLA_FWD_ENTRY0 0x1460
+#define PORTID_MASK 0x1ff
+#define ARL_CON_SHIFT 9
+#define ARL_CON_MASK 0x3
+#define ARL_PRI_SHIFT 11
+#define ARL_PRI_MASK 0x7
+#define ARL_AGE (1 << 14)
+#define ARL_STATIC (1 << 15)
+#define ARL_VALID (1 << 16)
+
+#define CORE_ARLA_MACVID_ENTRY(x) (CORE_ARLA_MACVID0 + ((x) * 0x40))
+#define CORE_ARLA_FWD_ENTRY(x) (CORE_ARLA_FWD_ENTRY0 + ((x) * 0x40))
+
+#define CORE_ARLA_SRCH_CTL 0x1540
+#define ARLA_SRCH_VLID (1 << 0)
+#define IVL_SVL_SELECT (1 << 6)
+#define ARLA_SRCH_STDN (1 << 7)
+
+#define CORE_ARLA_SRCH_ADR 0x1544
+#define ARLA_SRCH_ADR_VALID (1 << 15)
+
+#define CORE_ARLA_SRCH_RSLT_0_MACVID 0x1580
+#define CORE_ARLA_SRCH_RSLT_0 0x15a0
+
+#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
+#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
+
#define CORE_MEM_PSM_VDD_CTRL 0x2380
#define P_TXQ_PSM_VDD_SHIFT 2
#define P_TXQ_PSM_VDD_MASK 0x3
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index c29aebe..9093577 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -26,7 +26,7 @@
if (bus == NULL)
return -EINVAL;
- return mdiobus_read(bus, ds->pd->sw_addr + addr, reg);
+ return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg);
}
#define REG_READ(addr, reg) \
@@ -47,7 +47,7 @@
if (bus == NULL)
return -EINVAL;
- return mdiobus_write(bus, ds->pd->sw_addr + addr, reg, val);
+ return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val);
}
#define REG_WRITE(addr, reg, val) \
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index dfca352..2c8eb6f 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -122,7 +122,7 @@
.port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
};
MODULE_ALIAS("platform:mv88e6171");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 796fdcb..cbf4dd8 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -349,7 +349,7 @@
.port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
};
MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 4591240..b1b14f5 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -21,39 +20,10 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include <linux/seq_file.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "mv88e6xxx.h"
-/* MDIO bus access can be nested in the case of PHYs connected to the
- * internal MDIO bus of the switch, which is accessed via MDIO bus of
- * the Ethernet interface. Avoid lockdep false positives by using
- * mutex_lock_nested().
- */
-static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
-{
- int ret;
-
- mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
- ret = bus->read(bus, addr, regnum);
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
-static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
- u16 val)
-{
- int ret;
-
- mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
- ret = bus->write(bus, addr, regnum, val);
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
* use all 32 SMI bus addresses on its SMI bus, and all switch registers
* will be directly accessible on some {device address,register address}
@@ -68,7 +38,7 @@
int i;
for (i = 0; i < 16; i++) {
- ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
+ ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
if (ret < 0)
return ret;
@@ -84,7 +54,7 @@
int ret;
if (sw_addr == 0)
- return mv88e6xxx_mdiobus_read(bus, addr, reg);
+ return mdiobus_read_nested(bus, addr, reg);
/* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -92,8 +62,8 @@
return ret;
/* Transmit the read command. */
- ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
- SMI_CMD_OP_22_READ | (addr << 5) | reg);
+ ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_READ | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -103,7 +73,7 @@
return ret;
/* Read the data. */
- ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
+ ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
if (ret < 0)
return ret;
@@ -147,7 +117,7 @@
int ret;
if (sw_addr == 0)
- return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
+ return mdiobus_write_nested(bus, addr, reg, val);
/* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -155,13 +125,13 @@
return ret;
/* Transmit the data to write. */
- ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
+ ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
if (ret < 0)
return ret;
/* Transmit the write command. */
- ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
- SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+ ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -876,13 +846,6 @@
GLOBAL_ATU_OP_BUSY);
}
-/* Must be called with SMI lock held */
-static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
-{
- return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
- GLOBAL2_SCRATCH_BUSY);
-}
-
/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
int regnum)
@@ -1259,7 +1222,13 @@
return 0;
}
-static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
+{
+ return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+ vid & GLOBAL_VTU_VID_MASK);
+}
+
+static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_vtu_stu_entry next = { 0 };
@@ -1269,11 +1238,6 @@
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
- vid & GLOBAL_VTU_VID_MASK);
- if (ret < 0)
- return ret;
-
ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
if (ret < 0)
return ret;
@@ -1485,7 +1449,12 @@
int err;
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+
+ err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_vtu_getnext(ds, &vlan);
if (err)
goto unlock;
@@ -1514,7 +1483,11 @@
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+ err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_vtu_getnext(ds, &vlan);
if (err)
goto unlock;
@@ -1549,29 +1522,6 @@
return err;
}
-static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry)
-{
- int err;
-
- do {
- if (vid == 4095)
- return -ENOENT;
-
- err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
- if (err)
- return err;
-
- if (!entry->valid)
- return -ENOENT;
-
- vid = entry->vid;
- } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
- entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
-
- return 0;
-}
-
int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
unsigned long *ports, unsigned long *untagged)
{
@@ -1584,7 +1534,12 @@
return -ENOENT;
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
+ err = _mv88e6xxx_vtu_vid_write(ds, *vid);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_vtu_getnext(ds, &next);
+unlock:
mutex_unlock(&ps->smi_mutex);
if (err)
@@ -1732,7 +1687,6 @@
}
static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
- const unsigned char *addr,
struct mv88e6xxx_atu_entry *entry)
{
struct mv88e6xxx_atu_entry next = { 0 };
@@ -1744,10 +1698,6 @@
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_mac_write(ds, addr);
- if (ret < 0)
- return ret;
-
ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
if (ret < 0)
return ret;
@@ -1785,46 +1735,69 @@
return 0;
}
-/* get next entry for port */
-int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, u16 *vid, bool *is_static)
+int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- struct mv88e6xxx_atu_entry next;
- u16 fid = *vid; /* We use one FID per VLAN */
- int ret;
+ struct mv88e6xxx_vtu_stu_entry vlan = {
+ .vid = GLOBAL_VTU_VID_MASK, /* all ones */
+ };
+ int err;
mutex_lock(&ps->smi_mutex);
+ err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
+ if (err)
+ goto unlock;
+
do {
- if (is_broadcast_ether_addr(addr)) {
- struct mv88e6xxx_vtu_stu_entry vtu;
+ struct mv88e6xxx_atu_entry addr = {
+ .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
- ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
- if (ret < 0)
- goto unlock;
-
- *vid = vtu.vid;
- fid = vtu.fid;
- }
-
- ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
- if (ret < 0)
+ err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ if (err)
goto unlock;
- ether_addr_copy(addr, next.mac);
+ if (!vlan.valid)
+ break;
- if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
- continue;
- } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+ err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+ if (err)
+ goto unlock;
- *is_static = next.state == (is_multicast_ether_addr(addr) ?
- GLOBAL_ATU_DATA_STATE_MC_STATIC :
- GLOBAL_ATU_DATA_STATE_UC_STATIC);
+ do {
+ err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
+ if (err)
+ goto unlock;
+
+ if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ break;
+
+ if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
+ bool is_static = addr.state ==
+ (is_multicast_ether_addr(addr.mac) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+ fdb->vid = vlan.vid;
+ ether_addr_copy(fdb->addr, addr.mac);
+ fdb->ndm_state = is_static ? NUD_NOARP :
+ NUD_REACHABLE;
+
+ err = cb(&fdb->obj);
+ if (err)
+ goto unlock;
+ }
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ } while (vlan.vid < GLOBAL_VTU_VID_MASK);
+
unlock:
mutex_unlock(&ps->smi_mutex);
- return ret;
+ return err;
}
static void mv88e6xxx_bridge_work(struct work_struct *work)
@@ -2097,273 +2070,9 @@
return 0;
}
-static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
-{
- struct dsa_switch *ds = s->private;
-
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int reg, port;
-
- seq_puts(s, " GLOBAL GLOBAL2 ");
- for (port = 0 ; port < ps->num_ports; port++)
- seq_printf(s, " %2d ", port);
- seq_puts(s, "\n");
-
- for (reg = 0; reg < 32; reg++) {
- seq_printf(s, "%2x: ", reg);
- seq_printf(s, " %4x %4x ",
- mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
- mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
-
- for (port = 0 ; port < ps->num_ports; port++)
- seq_printf(s, "%4x ",
- mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
- seq_puts(s, "\n");
- }
-
- return 0;
-}
-
-static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mv88e6xxx_regs_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_regs_fops = {
- .open = mv88e6xxx_regs_open,
- .read = seq_read,
- .llseek = no_llseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void mv88e6xxx_atu_show_header(struct seq_file *s)
-{
- seq_puts(s, "DB T/P Vec State Addr\n");
-}
-
-static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
- unsigned char *addr, int data)
-{
- bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
- int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
- GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
- int state = data & GLOBAL_ATU_DATA_STATE_MASK;
-
- seq_printf(s, "%03x %5s %10pb %x %pM\n",
- dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
-}
-
-static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
- int dbnum)
-{
- unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- unsigned char addr[6];
- int ret, data, state;
-
- ret = _mv88e6xxx_atu_mac_write(ds, bcast);
- if (ret < 0)
- return ret;
-
- do {
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
- dbnum);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (ret < 0)
- return ret;
-
- data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
- if (data < 0)
- return data;
-
- state = data & GLOBAL_ATU_DATA_STATE_MASK;
- if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
- break;
- ret = _mv88e6xxx_atu_mac_read(ds, addr);
- if (ret < 0)
- return ret;
- mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
- } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
-
- return 0;
-}
-
-static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
-{
- struct dsa_switch *ds = s->private;
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int dbnum;
-
- mv88e6xxx_atu_show_header(s);
-
- for (dbnum = 0; dbnum < 255; dbnum++) {
- mutex_lock(&ps->smi_mutex);
- mv88e6xxx_atu_show_db(s, ds, dbnum);
- mutex_unlock(&ps->smi_mutex);
- }
-
- return 0;
-}
-
-static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mv88e6xxx_atu_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_atu_fops = {
- .open = mv88e6xxx_atu_open,
- .read = seq_read,
- .llseek = no_llseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void mv88e6xxx_stats_show_header(struct seq_file *s,
- struct mv88e6xxx_priv_state *ps)
-{
- int port;
-
- seq_puts(s, " Statistic ");
- for (port = 0 ; port < ps->num_ports; port++)
- seq_printf(s, "Port %2d ", port);
- seq_puts(s, "\n");
-}
-
-static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
-{
- struct dsa_switch *ds = s->private;
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
- int port, stat, max_stats;
- uint64_t value;
-
- if (have_sw_in_discards(ds))
- max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
- else
- max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
-
- mv88e6xxx_stats_show_header(s, ps);
-
- mutex_lock(&ps->smi_mutex);
-
- for (stat = 0; stat < max_stats; stat++) {
- seq_printf(s, "%19s: ", stats[stat].string);
- for (port = 0 ; port < ps->num_ports; port++) {
- _mv88e6xxx_stats_snapshot(ds, port);
- value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
- port);
- seq_printf(s, "%8llu ", value);
- }
- seq_puts(s, "\n");
- }
- mutex_unlock(&ps->smi_mutex);
-
- return 0;
-}
-
-static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mv88e6xxx_stats_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_stats_fops = {
- .open = mv88e6xxx_stats_open,
- .read = seq_read,
- .llseek = no_llseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
-{
- struct dsa_switch *ds = s->private;
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int target, ret;
-
- seq_puts(s, "Target Port\n");
-
- mutex_lock(&ps->smi_mutex);
- for (target = 0; target < 32; target++) {
- ret = _mv88e6xxx_reg_write(
- ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
- target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
- if (ret < 0)
- goto out;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
- GLOBAL2_DEVICE_MAPPING);
- seq_printf(s, " %2d %2d\n", target,
- ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
- }
-out:
- mutex_unlock(&ps->smi_mutex);
-
- return 0;
-}
-
-static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_device_map_fops = {
- .open = mv88e6xxx_device_map_open,
- .read = seq_read,
- .llseek = no_llseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
-{
- struct dsa_switch *ds = s->private;
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int reg, ret;
-
- seq_puts(s, "Register Value\n");
-
- mutex_lock(&ps->smi_mutex);
- for (reg = 0; reg < 0x80; reg++) {
- ret = _mv88e6xxx_reg_write(
- ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
- reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
- if (ret < 0)
- goto out;
-
- ret = _mv88e6xxx_scratch_wait(ds);
- if (ret < 0)
- goto out;
-
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
- GLOBAL2_SCRATCH_MISC);
- seq_printf(s, " %2x %2x\n", reg,
- ret & GLOBAL2_SCRATCH_VALUE_MASK);
- }
-out:
- mutex_unlock(&ps->smi_mutex);
-
- return 0;
-}
-
-static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_scratch_fops = {
- .open = mv88e6xxx_scratch_open,
- .read = seq_read,
- .llseek = no_llseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
int mv88e6xxx_setup_common(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- char *name;
mutex_init(&ps->smi_mutex);
@@ -2371,24 +2080,6 @@
INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
- name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
- ps->dbgfs = debugfs_create_dir(name, NULL);
- kfree(name);
-
- debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
- &mv88e6xxx_regs_fops);
-
- debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
- &mv88e6xxx_atu_fops);
-
- debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
- &mv88e6xxx_stats_fops);
-
- debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
- &mv88e6xxx_device_map_fops);
-
- debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
- &mv88e6xxx_scratch_fops);
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 4ba69f3..6f9ed5d 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -406,8 +406,6 @@
u8 port_state[DSA_MAX_PORTS];
struct work_struct bridge_work;
-
- struct dentry *dbgfs;
};
struct mv88e6xxx_hw_stat {
@@ -474,8 +472,9 @@
struct switchdev_trans *trans);
int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb);
-int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, u16 *vid, bool *is_static);
+int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj));
int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int reg, int val);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 815eb94..69fc840 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -147,8 +147,12 @@
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
- dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
+ dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
+ dev->features |= NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_GSO_ENCAP_ALL;
+ dev->hw_features |= dev->features;
+ dev->hw_enc_features |= dev->features;
eth_hw_addr_random(dev);
}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 48ce83e..8d50314 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -847,21 +847,25 @@
if (ndev->irq == -ENXIO) {
netdev_err(ndev, "No irq resource\n");
ret = ndev->irq;
- goto out;
+ goto out_iounmap;
}
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
- goto out;
+ goto out_iounmap;
}
- clk_prepare_enable(db->clk);
+ ret = clk_prepare_enable(db->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+ goto out_iounmap;
+ }
ret = sunxi_sram_claim(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
- goto out;
+ goto out_clk_disable_unprepare;
}
db->phy_node = of_parse_phandle(np, "phy", 0);
@@ -910,6 +914,10 @@
out_release_sram:
sunxi_sram_release(&pdev->dev);
+out_clk_disable_unprepare:
+ clk_disable_unprepare(db->clk);
+out_iounmap:
+ iounmap(db->membase);
out:
dev_err(db->dev, "not found (%d).\n", ret);
@@ -921,8 +929,12 @@
static int emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_board_info *db = netdev_priv(ndev);
unregister_netdev(ndev);
+ sunxi_sram_release(&pdev->dev);
+ clk_disable_unprepare(db->clk);
+ iounmap(db->membase);
free_netdev(ndev);
dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4551224..112f1bc 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1595,7 +1595,7 @@
packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
- dma_wmb();
+ wmb();
ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 49f796a..cff8940 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1811,6 +1811,7 @@
struct netdev_queue *txq;
int processed = 0;
unsigned int tx_packets = 0, tx_bytes = 0;
+ unsigned int cur;
DBGPR("-->xgbe_tx_poll\n");
@@ -1818,10 +1819,11 @@
if (!ring)
return 0;
+ cur = ring->cur;
txq = netdev_get_tx_queue(netdev, channel->queue_index);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
- (ring->dirty != ring->cur)) {
+ (ring->dirty != cur)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
rdesc = rdata->rdesc;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 652f218..33850a0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -461,6 +461,7 @@
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
{
+ struct device *dev = &pdata->pdev->dev;
u32 value, mc2;
u32 intf_ctl, rgmii;
u32 icm0, icm2;
@@ -490,7 +491,12 @@
default:
ENET_INTERFACE_MODE2_SET(&mc2, 2);
intf_ctl |= ENET_GHD_MODE;
- CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
+
+ if (dev->of_node) {
+ CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
+ CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
+ }
+
xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index ff05bbc..6dee73c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -144,6 +144,7 @@
#define CFG_BYPASS_UNISEC_RX BIT(1)
#define CFG_CLE_BYPASS_EN0 BIT(31)
#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3)
+#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 6b1846d..ce10687 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1118,6 +1118,47 @@
return ret;
}
+static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
+
+ ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
+ if (ret) {
+ pdata->tx_delay = 4;
+ return 0;
+ }
+
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid tx-delay specified\n");
+ return -EINVAL;
+ }
+
+ pdata->tx_delay = delay;
+
+ return 0;
+}
+
+static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
+
+ ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
+ if (ret) {
+ pdata->rx_delay = 2;
+ return 0;
+ }
+
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid rx-delay specified\n");
+ return -EINVAL;
+ }
+
+ pdata->rx_delay = delay;
+
+ return 0;
+}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
@@ -1194,6 +1235,14 @@
return -ENODEV;
}
+ ret = xgene_get_tx_delay(pdata);
+ if (ret)
+ return ret;
+
+ ret = xgene_get_rx_delay(pdata);
+ if (ret)
+ return ret;
+
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(dev, "Unable to get ENET Rx IRQ\n");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index ff89a5d..a6e56b8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -184,6 +184,8 @@
u8 bp_bufnum;
u16 ring_num;
u32 mss;
+ u8 tx_delay;
+ u8 rx_delay;
};
struct xgene_indirect_ctl {
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e930aa9..67a7d52 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -170,4 +170,23 @@
Broadcom BCM7xxx Set Top Box family chipset using an internal
Ethernet switch.
+config BNXT
+ tristate "Broadcom NetXtreme-C/E support"
+ depends on PCI
+ select FW_LOADER
+ select LIBCRC32C
+ ---help---
+ This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+ Ethernet cards. To compile this driver as a module, choose M here:
+ the module will be called bnxt_en. This is recommended.
+
+config BNXT_SRIOV
+ bool "Broadcom NetXtreme-C/E SR-IOV support"
+ depends on BNXT && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the NetXtreme-C/E products. This
+ allows for virtual function acceleration in virtual environments.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index e2a958a..00584d7 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -12,3 +12,4 @@
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
+obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 95af75d..8b1929e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -2048,7 +2048,7 @@
for (i = 0; i < priv->num_ports; i++) {
struct bcm63xx_enetsw_port *port;
- int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
+ int val, j, up, advertise, lpa, speed, duplex, media;
int external_phy = bcm_enet_port_is_rgmii(i);
u8 override;
@@ -2091,22 +2091,27 @@
lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
MII_LPA);
- lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
- MII_STAT1000);
-
/* figure out media and duplex from advertise and LPA values */
media = mii_nway_result(lpa & advertise);
duplex = (media & ADVERTISE_FULL) ? 1 : 0;
- if (lpa2 & LPA_1000FULL)
- duplex = 1;
- if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
- speed = 1000;
- else {
- if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
- speed = 100;
- else
- speed = 10;
+ if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+ speed = 100;
+ else
+ speed = 10;
+
+ if (val & BMSR_ESTATEN) {
+ advertise = bcmenet_sw_mdio_read(priv, external_phy,
+ port->phy_id, MII_CTRL1000);
+
+ lpa = bcmenet_sw_mdio_read(priv, external_phy,
+ port->phy_id, MII_STAT1000);
+
+ if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+ && lpa & (LPA_1000FULL | LPA_1000HALF)) {
+ speed = 1000;
+ duplex = (lpa & LPA_1000FULL);
+ }
}
dev_info(&priv->pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
new file mode 100644
index 0000000..97e78e2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_BNXT) += bnxt_en.o
+
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
new file mode 100644
index 0000000..6c2e0c6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -0,0 +1,5728 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/cpu_rmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#define BNXT_TX_TIMEOUT (5 * HZ)
+
+static const char version[] =
+ "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 92
+
+enum board_idx {
+ BCM57302,
+ BCM57304,
+ BCM57404,
+ BCM57406,
+ BCM57304_VF,
+ BCM57404_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+ char *name;
+} board_info[] = {
+ { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
+ { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+ { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+ { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+#ifdef CONFIG_BNXT_SRIOV
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+#endif
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+ HWRM_FUNC_CFG,
+ HWRM_PORT_PHY_QCFG,
+ HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+ return (idx == BCM57304_VF || idx == BCM57404_VF);
+}
+
+#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons) \
+ writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons) \
+ writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db) \
+ writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+
+ return bp->tx_ring_size -
+ ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+static const u16 bnxt_lhint_arr[] = {
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+ TX_BD_FLAGS_LHINT_512_TO_1023,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct tx_bd *txbd;
+ struct tx_bd_ext *txbd1;
+ struct netdev_queue *txq;
+ int i;
+ dma_addr_t mapping;
+ unsigned int length, pad = 0;
+ u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+ u16 prod, last_frag;
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ i = skb_get_queue_mapping(skb);
+ if (unlikely(i >= bp->tx_nr_rings)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(dev, i);
+ prod = txr->tx_prod;
+
+ free_size = bnxt_tx_avail(bp, txr);
+ if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+ netif_tx_stop_queue(txq);
+ return NETDEV_TX_BUSY;
+ }
+
+ length = skb->len;
+ len = skb_headlen(skb);
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd->tx_bd_opaque = prod;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = skb;
+ tx_buf->nr_frags = last_frag;
+
+ vlan_tag_flags = 0;
+ cfa_action = 0;
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+ skb_vlan_tag_get(skb);
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ if (skb->vlan_proto == htons(ETH_P_8021Q))
+ vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+ }
+
+ if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+ struct tx_push_bd *push = txr->tx_push;
+ struct tx_bd *tx_push = &push->txbd1;
+ struct tx_bd_ext *tx_push1 = &push->txbd2;
+ void *pdata = tx_push1 + 1;
+ int j;
+
+ /* Set COAL_NOW to be ready quickly for the next push */
+ tx_push->tx_bd_len_flags_type =
+ cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+ TX_BD_TYPE_LONG_TX_BD |
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+ TX_BD_FLAGS_COAL_NOW |
+ TX_BD_FLAGS_PACKET_END |
+ (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_push1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ else
+ tx_push1->tx_bd_hsize_lflags = 0;
+
+ tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+ skb_copy_from_linear_data(skb, pdata, len);
+ pdata += len;
+ for (j = 0; j < last_frag; j++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ void *fptr;
+
+ fptr = skb_frag_address_safe(frag);
+ if (!fptr)
+ goto normal_tx;
+
+ memcpy(pdata, fptr, skb_frag_size(frag));
+ pdata += skb_frag_size(frag);
+ }
+
+ memcpy(txbd, tx_push, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ memcpy(txbd, tx_push1, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ push->doorbell =
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+ txr->tx_prod = prod;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ __iowrite64_copy(txr->tx_doorbell, push,
+ (length + sizeof(*push) + 8) / 8);
+
+ tx_buf->is_push = 1;
+
+ goto tx_done;
+ }
+
+normal_tx:
+ if (length < BNXT_MIN_PKT_SIZE) {
+ pad = BNXT_MIN_PKT_SIZE - length;
+ if (skb_pad(skb, pad)) {
+ /* SKB already freed. */
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+ length = BNXT_MIN_PKT_SIZE;
+ }
+
+ mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+ dev_kfree_skb_any(skb);
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd1->tx_bd_hsize_lflags = 0;
+ if (skb_is_gso(skb)) {
+ u32 hdr_len;
+
+ if (skb->encapsulation)
+ hdr_len = skb_inner_network_offset(skb) +
+ skb_inner_network_header_len(skb) +
+ inner_tcp_hdrlen(skb);
+ else
+ hdr_len = skb_transport_offset(skb) +
+ tcp_hdrlen(skb);
+
+ txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+ TX_BD_FLAGS_T_IPID |
+ (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+ length = skb_shinfo(skb)->gso_size;
+ txbd1->tx_bd_mss = cpu_to_le32(length);
+ length += hdr_len;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ txbd1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ txbd1->tx_bd_mss = 0;
+ }
+
+ length >>= 9;
+ flags |= bnxt_lhint_arr[length];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+ for (i = 0; i < last_frag; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_dma_error;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ flags = len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type =
+ cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+ mmiowb();
+
+ if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+ netif_tx_wake_queue(txq);
+ }
+ return NETDEV_TX_OK;
+
+tx_dma_error:
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = NULL;
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ prod = NEXT_TX(prod);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ prod = NEXT_TX(prod);
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ PCI_DMA_TODEVICE);
+ }
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ int index = bnapi->index;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+ u16 cons = txr->tx_cons;
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+ unsigned int tx_bytes = 0;
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct sk_buff *skb;
+ int j, last;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = NEXT_TX(cons);
+ skb = tx_buf->skb;
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ tx_buf->is_push = 0;
+ goto next_tx_int;
+ }
+
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ last = tx_buf->nr_frags;
+
+ for (j = 0; j < last; j++) {
+ cons = NEXT_TX(cons);
+ tx_buf = &txr->tx_buf_ring[cons];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[j]),
+ PCI_DMA_TODEVICE);
+ }
+
+next_tx_int:
+ cons = NEXT_TX(cons);
+
+ tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+ txr->tx_cons = cons;
+
+ /* Need to make the tx_cons update visible to bnxt_start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnxt_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+ (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ txr->dev_state != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ u8 *data;
+ struct pci_dev *pdev = bp->pdev;
+
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
+ return NULL;
+
+ *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+ if (dma_mapping_error(&pdev->dev, *mapping)) {
+ kfree(data);
+ data = NULL;
+ }
+ return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ u8 *data;
+ dma_addr_t mapping;
+
+ data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
+ u8 *data)
+{
+ u16 prod = rxr->rx_prod;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *cons_bd, *prod_bd;
+
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ prod_rx_buf->data = data;
+
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+ prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd =
+ &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+ struct pci_dev *pdev = bp->pdev;
+ struct page *page;
+ dma_addr_t mapping;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+
+ mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+ rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+ rx_agg_buf->page = page;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ struct page *page;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+ /* It is possible for sw_prod to be equal to cons, so
+ * set cons_rx_buf->page to NULL first.
+ */
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+ prod_rx_buf->page = page;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+ prod_bd->rx_bd_opaque = sw_prod;
+
+ prod = NEXT_RX_AGG(prod);
+ sw_prod = NEXT_RX_AGG(sw_prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr, u16 cons,
+ u16 prod, u8 *data, dma_addr_t dma_addr,
+ unsigned int len)
+{
+ int err;
+ struct sk_buff *skb;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ kfree(data);
+ return NULL;
+ }
+
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ return skb;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct sk_buff *skb, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons, frag_len;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+ struct page *page;
+ dma_addr_t mapping;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+ RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+ skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ /* It is possible for bnxt_alloc_rx_page() to allocate
+ * a sw_prod index that equals the cons index, so we
+ * need to clear the cons entry now.
+ */
+ mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ struct skb_shared_info *shinfo;
+ unsigned int nr_frags;
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = --shinfo->nr_frags;
+ __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+ dev_kfree_skb(skb);
+
+ cons_rx_buf->page = page;
+
+ /* Update prod since possibly some pages have been
+ * allocated already.
+ */
+ rxr->rx_agg_prod = prod;
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+ return NULL;
+ }
+
+ dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ skb->data_len += frag_len;
+ skb->len += frag_len;
+ skb->truesize += PAGE_SIZE;
+
+ prod = NEXT_RX_AGG(prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u8 agg_bufs, u32 *raw_cons)
+{
+ u16 last;
+ struct rx_agg_cmp *agg;
+
+ *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+ last = RING_CMP(*raw_cons);
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+ return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct pci_dev *pdev = bp->pdev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&bnapi->napi, len);
+ if (!skb)
+ return NULL;
+
+ dma_sync_single_for_cpu(&pdev->dev, mapping,
+ bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+
+ memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+
+ dma_sync_single_for_device(&pdev->dev, mapping,
+ bp->rx_copy_thresh,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, len);
+ return skb;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ u8 agg_id = TPA_START_AGG_ID(tpa_start);
+ u16 cons, prod;
+ struct bnxt_tpa_info *tpa_info;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ dma_addr_t mapping;
+
+ cons = tpa_start->rx_tpa_start_cmp_opaque;
+ prod = rxr->rx_prod;
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ prod_rx_buf->data = tpa_info->data;
+
+ mapping = tpa_info->mapping;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ tpa_info->data = cons_rx_buf->data;
+ cons_rx_buf->data = NULL;
+ tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+
+ tpa_info->len =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+ RX_TPA_START_CMP_LEN_SHIFT;
+ if (likely(TPA_START_HASH_VALID(tpa_start))) {
+ u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+ tpa_info->hash_type = PKT_HASH_TYPE_L4;
+ tpa_info->gso_type = SKB_GSO_TCPV4;
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type == 3)
+ tpa_info->gso_type = SKB_GSO_TCPV6;
+ tpa_info->rss_hash =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+ } else {
+ tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+ tpa_info->gso_type = 0;
+ if (netif_msg_rx_err(bp))
+ netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ }
+ tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+ tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+
+ rxr->rx_prod = NEXT_RX(prod);
+ cons = NEXT_RX(cons);
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+ rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+ cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u16 cp_cons, u32 agg_bufs)
+{
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ struct tcphdr *th;
+ int payload_off, tcp_opt_len = 0;
+ int len, nw_off;
+
+ NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (TPA_END_GRO_TS(tpa_end))
+ tcp_opt_len = 12;
+
+ if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+ struct iphdr *iph;
+
+ nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ip_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+ } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+ struct ipv6hdr *iph;
+
+ nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ipv6_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+ } else {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ tcp_gro_complete(skb);
+
+ if (nw_off) { /* tunnel */
+ struct udphdr *uh = NULL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |=
+ SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ }
+#endif
+ return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ struct bnxt_napi *bnapi,
+ u32 *raw_cons,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u8 agg_id = TPA_END_AGG_ID(tpa_end);
+ u8 *data, agg_bufs;
+ u16 cp_cons = RING_CMP(*raw_cons);
+ unsigned int len;
+ struct bnxt_tpa_info *tpa_info;
+ dma_addr_t mapping;
+ struct sk_buff *skb;
+
+ tpa_info = &rxr->rx_tpa[agg_id];
+ data = tpa_info->data;
+ prefetch(data);
+ len = tpa_info->len;
+ mapping = tpa_info->mapping;
+
+ agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *agg_event = true;
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
+ return NULL;
+ }
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, mapping);
+ if (!skb) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ } else {
+ u8 *new_data;
+ dma_addr_t new_mapping;
+
+ new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ if (!new_data) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+
+ tpa_info->data = new_data;
+ tpa_info->mapping = new_mapping;
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (!skb) {
+ kfree(data);
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ /* Page reuse already handled by bnxt_rx_pages(). */
+ return NULL;
+ }
+ }
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ netdev_features_t features = skb->dev->features;
+ u16 vlan_proto = tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD)) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ tpa_info->metadata &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+ }
+
+ skb_checksum_none_assert(skb);
+ if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level =
+ (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+ }
+
+ if (TPA_END_GRO(tpa_end))
+ skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+
+ return skb;
+}
+
+/* returns the following:
+ * 1 - 1 packet successfully received
+ * 0 - successful TPA_START, packet not completed yet
+ * -EBUSY - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
+ struct rx_cmp *rxcmp;
+ struct rx_cmp_ext *rxcmp1;
+ u32 tmp_raw_cons = *raw_cons;
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct bnxt_sw_rx_bd *rx_buf;
+ unsigned int len;
+ u8 *data, agg_bufs, cmp_type;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ int rc = 0;
+
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ prod = rxr->rx_prod;
+
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+ bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+ goto next_rx_no_prod;
+
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+ (struct rx_tpa_end_cmp *)rxcmp,
+ (struct rx_tpa_end_cmp_ext *)rxcmp1,
+ agg_event);
+
+ if (unlikely(IS_ERR(skb)))
+ return -EBUSY;
+
+ rc = -ENOMEM;
+ if (likely(skb)) {
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+ }
+ goto next_rx_no_prod;
+ }
+
+ cons = rxcmp->rx_cmp_opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ prefetch(data);
+
+ agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+ RX_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+
+ cp_cons = NEXT_CMP(cp_cons);
+ *agg_event = true;
+ }
+
+ rx_buf->data = NULL;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+ rc = -EIO;
+ goto next_rx;
+ }
+
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (RX_CMP_HASH_VALID(rxcmp)) {
+ u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type != 1 && hash_type != 3)
+ type = PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
+ netdev_features_t features = skb->dev->features;
+ u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD))
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ meta_data &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ } else {
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
+ cpr->rx_l4_csum_errors++;
+ }
+
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+
+next_rx:
+ rxr->rx_prod = NEXT_RX(prod);
+
+next_rx_no_prod:
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
+}
+
+static int bnxt_async_event_process(struct bnxt *bp,
+ struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+
+ /* TODO CHIMP_FW: Define event id's for link change, error etc */
+ switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+ default:
+ netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
+ event_id);
+ break;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+ u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+ struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+ struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+ (struct hwrm_fwd_req_cmpl *)txcmp;
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_HWRM_DONE:
+ seq_id = le16_to_cpu(h_cmpl->sequence_id);
+ if (seq_id == bp->hwrm_intr_seq_id)
+ bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+ else
+ netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+ vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+ if ((vf_id < bp->pf.first_vf_id) ||
+ (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+ netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+ vf_id);
+ return -EINVAL;
+ }
+
+ set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+ set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ bnxt_async_event_process(bp,
+ (struct hwrm_async_event_cmpl *)txcmp);
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ u32 raw_cons = cpr->cp_raw_cons;
+ u16 cons = RING_CMP(raw_cons);
+ struct tx_cmp *txcmp;
+
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+#define CAG_LEGACY_INT_STATUS 0x2014
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+ u32 int_status;
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+ if (!bnxt_has_work(bp, cpr)) {
+ int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
+ /* return if erroneous interrupt */
+ if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+ return IRQ_NONE;
+ }
+
+ /* disable ring IRQ */
+ BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+ /* Return here if interrupt is shared and is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 raw_cons = cpr->cp_raw_cons;
+ u32 cons;
+ int tx_pkts = 0;
+ int rx_pkts = 0;
+ bool rx_event = false;
+ bool agg_event = false;
+ struct tx_cmp *txcmp;
+
+ while (1) {
+ int rc;
+
+ cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons))
+ break;
+
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+ tx_pkts++;
+ /* return full budget so NAPI will complete. */
+ if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ rx_pkts = budget;
+ } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ if (likely(rc >= 0))
+ rx_pkts += rc;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ rx_event = true;
+ } else if (unlikely((TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_DONE) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ bnxt_hwrm_handler(bp, txcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts == budget)
+ break;
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ /* ACK completion ring before freeing tx ring and producing new
+ * buffers in rx/agg rings to prevent overflowing the completion
+ * ring.
+ */
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ if (tx_pkts)
+ bnxt_tx_int(bp, bnapi, tx_pkts);
+
+ if (rx_event) {
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ if (agg_event) {
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ }
+ }
+ return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int work_done = 0;
+
+ if (!bnxt_lock_napi(bnapi))
+ return budget;
+
+ while (1) {
+ work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+ if (work_done >= budget)
+ break;
+
+ if (!bnxt_has_work(bp, cpr)) {
+ napi_complete(napi);
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ break;
+ }
+ }
+ mmiowb();
+ bnxt_unlock_napi(bnapi);
+ return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int rx_work, budget = 4;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ return LL_FLUSH_FAILED;
+
+ if (!bnxt_lock_poll(bnapi))
+ return LL_FLUSH_BUSY;
+
+ rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ bnxt_unlock_poll(bnapi);
+ return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i, max_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ for (j = 0; j < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb = tx_buf->skb;
+ int k, last;
+
+ if (!skb) {
+ j++;
+ continue;
+ }
+
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ j += 2;
+ continue;
+ }
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ last = tx_buf->nr_frags;
+ j += 2;
+ for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+ tx_buf = &txr->tx_buf_ring[j];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ }
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+ int i, max_idx, max_agg_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+ max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ if (rxr->rx_tpa) {
+ for (j = 0; j < MAX_TPA; j++) {
+ struct bnxt_tpa_info *tpa_info =
+ &rxr->rx_tpa[j];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(
+ &pdev->dev,
+ dma_unmap_addr(tpa_info, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ tpa_info->data = NULL;
+
+ kfree(data);
+ }
+ }
+
+ for (j = 0; j < max_idx; j++) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+ u8 *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ rx_buf->data = NULL;
+
+ kfree(data);
+ }
+
+ for (j = 0; j < max_agg_idx; j++) {
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+ &rxr->rx_agg_ring[j];
+ struct page *page = rx_agg_buf->page;
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(rx_agg_buf, mapping),
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ rx_agg_buf->page = NULL;
+ __clear_bit(j, rxr->rx_agg_bmap);
+
+ __free_page(page);
+ }
+ }
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+ bnxt_free_tx_skbs(bp);
+ bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ if (!ring->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, ring->page_size,
+ ring->pg_arr[i], ring->dma_arr[i]);
+
+ ring->pg_arr[i] = NULL;
+ }
+ if (ring->pg_tbl) {
+ dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+ ring->pg_tbl, ring->pg_tbl_map);
+ ring->pg_tbl = NULL;
+ }
+ if (ring->vmem_size && *ring->vmem) {
+ vfree(*ring->vmem);
+ *ring->vmem = NULL;
+ }
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (ring->nr_pages > 1) {
+ ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+ ring->nr_pages * 8,
+ &ring->pg_tbl_map,
+ GFP_KERNEL);
+ if (!ring->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ ring->page_size,
+ &ring->dma_arr[i],
+ GFP_KERNEL);
+ if (!ring->pg_arr[i])
+ return -ENOMEM;
+
+ if (ring->nr_pages > 1)
+ ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+ }
+
+ if (ring->vmem_size) {
+ *ring->vmem = vzalloc(ring->vmem_size);
+ if (!(*ring->vmem))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, ring);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+ int i, rc, agg_rings = 0, tpa_rings = 0;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ agg_rings = 1;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ tpa_rings = 1;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (agg_rings) {
+ u16 mem_size;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ if (tpa_rings) {
+ rxr->rx_tpa = kcalloc(MAX_TPA,
+ sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+ }
+ }
+ }
+ return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+
+ if (txr->tx_push) {
+ dma_free_coherent(&pdev->dev, bp->tx_push_size,
+ txr->tx_push, txr->tx_push_mapping);
+ txr->tx_push = NULL;
+ }
+
+ ring = &txr->tx_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+ int i, j, rc;
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->tx_push_size = 0;
+ if (bp->tx_push_thresh) {
+ int push_size;
+
+ push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+ bp->tx_push_thresh);
+
+ if (push_size > 128) {
+ push_size = 0;
+ bp->tx_push_thresh = 0;
+ }
+
+ bp->tx_push_size = push_size;
+ }
+
+ for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (bp->tx_push_size) {
+ struct tx_bd *txbd;
+ dma_addr_t mapping;
+
+ /* One pre-allocated DMA buffer to backup
+ * TX push operation
+ */
+ txr->tx_push = dma_alloc_coherent(&pdev->dev,
+ bp->tx_push_size,
+ &txr->tx_push_mapping,
+ GFP_KERNEL);
+
+ if (!txr->tx_push)
+ return -ENOMEM;
+
+ txbd = &txr->tx_push->txbd1;
+
+ mapping = txr->tx_push_mapping +
+ sizeof(struct tx_push_bd);
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+ }
+ ring->queue_id = bp->q_info[j].queue_id;
+ if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+ int i, rc;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+ ring->nr_pages = bp->cp_nr_pages;
+ ring->page_size = HW_CMPD_RING_SIZE;
+ ring->pg_arr = (void **)cpr->cp_desc_ring;
+ ring->dma_arr = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ ring->nr_pages = bp->rx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_desc_ring;
+ ring->dma_arr = rxr->rx_desc_mapping;
+ ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ ring->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->nr_pages = bp->rx_agg_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ ring->dma_arr = rxr->rx_agg_desc_mapping;
+ ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ ring->vmem = (void **)&rxr->rx_agg_ring;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+ ring->nr_pages = bp->tx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)txr->tx_desc_ring;
+ ring->dma_arr = txr->tx_desc_mapping;
+ ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ ring->vmem = (void **)&txr->tx_buf_ring;
+ }
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+ int i;
+ u32 prod;
+ struct rx_bd **rx_buf_ring;
+
+ rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+ for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+ int j;
+ struct rx_bd *rxbd;
+
+ rxbd = rx_buf_ring[i];
+ if (!rxbd)
+ continue;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+ u32 prod, type;
+ int i;
+
+ if (!bnapi)
+ return -EINVAL;
+
+ type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_prod;
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX(prod);
+ }
+ rxr->rx_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ ring = &rxr->rx_agg_ring_struct;
+
+ type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_agg_prod;
+ for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX_AGG(prod);
+ }
+ rxr->rx_agg_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ if (rxr->rx_tpa) {
+ u8 *data;
+ dma_addr_t mapping;
+
+ for (i = 0; i < MAX_TPA; i++) {
+ data = __bnxt_alloc_rx_data(bp, &mapping,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+ } else {
+ netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ rc = bnxt_init_one_rx_ring(bp, i);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+ u16 i;
+
+ bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+ MAX_SKB_FRAGS + 1);
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+ kfree(bp->grp_info);
+ bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+ int i;
+
+ if (irq_re_init) {
+ bp->grp_info = kcalloc(bp->cp_nr_rings,
+ sizeof(struct bnxt_ring_grp_info),
+ GFP_KERNEL);
+ if (!bp->grp_info)
+ return -ENOMEM;
+ }
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (irq_re_init)
+ bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+ kfree(bp->vnic_info);
+ bp->vnic_info = NULL;
+ bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+ int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->flags & BNXT_FLAG_RFS)
+ num_vnics += bp->rx_nr_rings;
+#endif
+
+ bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+ GFP_KERNEL);
+ if (!bp->vnic_info)
+ return -ENOMEM;
+
+ bp->nr_vnics = num_vnics;
+ return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+ if (bp->vnic_info[i].rss_hash_key) {
+ if (i == 0)
+ prandom_bytes(vnic->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ else
+ memcpy(vnic->rss_hash_key,
+ bp->vnic_info[0].rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+ int pages;
+
+ pages = ring_size / desc_per_pg;
+
+ if (!pages)
+ return 1;
+
+ pages++;
+
+ while (pages & (pages - 1))
+ pages++;
+
+ return pages;
+}
+
+static void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->dev->features & NETIF_F_LRO)
+ bp->flags |= BNXT_FLAG_LRO;
+ if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ bp->flags |= BNXT_FLAG_GRO;
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+ u32 ring_size, rx_size, rx_space;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+ ring_size = bp->rx_ring_size;
+ bp->rx_agg_ring_size = 0;
+ bp->rx_agg_nr_pages = 0;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ agg_factor = 4;
+
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bp->flags |= BNXT_FLAG_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ if (agg_ring_size) {
+ bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+ RX_DESC_CNT);
+ if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bp->rx_agg_ring_size = agg_ring_size;
+ bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bp->rx_buf_use_size = rx_size;
+ bp->rx_buf_size = rx_space;
+
+ bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+ bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bp->tx_ring_size;
+ bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+ bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+ bp->cp_ring_size = ring_size;
+
+ bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+ if (bp->cp_nr_pages > MAX_CP_PAGES) {
+ bp->cp_nr_pages = MAX_CP_PAGES;
+ bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+ ring_size, bp->cp_ring_size);
+ }
+ bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+ bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->vnic_info)
+ return;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ kfree(vnic->fw_grp_ids);
+ vnic->fw_grp_ids = NULL;
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+ int i, rc = 0, size;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+ int max_rings;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+ int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ if (mem_size > 0) {
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(&pdev->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+ if (!vnic->fw_grp_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Allocate rss table and hash key */
+ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+out:
+ return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+
+ bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_dbg_resp_addr) {
+ dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+ bp->hwrm_dbg_resp_addr,
+ bp->hwrm_dbg_resp_dma_addr);
+
+ bp->hwrm_dbg_resp_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &bp->hwrm_cmd_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_cmd_resp_addr)
+ return -ENOMEM;
+ bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+ HWRM_DBG_REG_BUF_SIZE,
+ &bp->hwrm_dbg_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_dbg_resp_addr)
+ netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+ return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats) {
+ dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+ cpr->hw_stats_map);
+ cpr->hw_stats = NULL;
+ }
+ }
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+ &cpr->hw_stats_map,
+ GFP_KERNEL);
+ if (!cpr->hw_stats)
+ return -ENOMEM;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ cpr->cp_raw_cons = 0;
+
+ txr = &bnapi->tx_ring;
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+
+ rxr = &bnapi->rx_ring;
+ rxr->rx_prod = 0;
+ rxr->rx_agg_prod = 0;
+ rxr->rx_sw_agg_prod = 0;
+ }
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i;
+
+ /* Under rtnl_lock and all our NAPIs have been disabled. It's
+ * safe to delete the hash table.
+ */
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_del(&fltr->hash);
+ kfree(fltr);
+ }
+ }
+ if (irq_reinit) {
+ kfree(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
+ }
+ bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return 0;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+ bp->ntp_fltr_count = 0;
+ bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ GFP_KERNEL);
+
+ if (!bp->ntp_fltr_bmap)
+ rc = -ENOMEM;
+
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_free_vnic_attributes(bp);
+ bnxt_free_tx_rings(bp);
+ bnxt_free_rx_rings(bp);
+ bnxt_free_cp_rings(bp);
+ bnxt_free_ntp_fltrs(bp, irq_re_init);
+ if (irq_re_init) {
+ bnxt_free_stats(bp);
+ bnxt_free_ring_grps(bp);
+ bnxt_free_vnics(bp);
+ kfree(bp->bnapi);
+ bp->bnapi = NULL;
+ } else {
+ bnxt_clear_ring_indices(bp);
+ }
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+ int i, rc, size, arr_size;
+ void *bnapi;
+
+ if (irq_re_init) {
+ /* Allocate bnapi mem pointer array and mem block for
+ * all queues
+ */
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+ bp->cp_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+ bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return -ENOMEM;
+
+ bp->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+ bp->bnapi[i] = bnapi;
+ bp->bnapi[i]->index = i;
+ bp->bnapi[i]->bp = bp;
+ }
+
+ rc = bnxt_alloc_stats(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_ntp_fltrs(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_vnics(bp);
+ if (rc)
+ goto alloc_mem_err;
+ }
+
+ bnxt_init_ring_struct(bp);
+
+ rc = bnxt_alloc_rx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_tx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_cp_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ rc = bnxt_alloc_vnic_attributes(bp);
+ if (rc)
+ goto alloc_mem_err;
+ return 0;
+
+alloc_mem_err:
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+ u16 cmpl_ring, u16 target_id)
+{
+ struct hwrm_cmd_req_hdr *req = request;
+
+ req->cmpl_ring_req_type =
+ cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
+ req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int i, intr_process, rc;
+ struct hwrm_cmd_req_hdr *req = msg;
+ u32 *data = msg;
+ __le32 *resp_len, *valid;
+ u16 cp_ring_id, len = 0;
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+ req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+ memset(resp, 0, PAGE_SIZE);
+ cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
+ HWRM_CMPL_RING_MASK) >>
+ HWRM_CMPL_RING_SFT;
+ intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+ /* currently supports only one outstanding message */
+ if (intr_process)
+ bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
+ HWRM_SEQ_ID_MASK;
+
+ /* Ring channel doorbell */
+ writel(1, bp->bar0 + 0x100);
+
+ i = 0;
+ if (intr_process) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+ i++ < timeout) {
+ usleep_range(600, 800);
+ }
+
+ if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+ netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+ req->cmpl_ring_req_type);
+ return -1;
+ }
+ } else {
+ /* Check if response len is updated */
+ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ for (i = 0; i < timeout; i++) {
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+ HWRM_RESP_LEN_SFT;
+ if (len)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, *resp_len);
+ return -1;
+ }
+
+ /* Last word of resp contains valid bit */
+ valid = bp->hwrm_cmd_resp_addr + len - 4;
+ for (i = 0; i < timeout; i++) {
+ if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, len, *valid);
+ return -1;
+ }
+ }
+
+ rc = le16_to_cpu(resp->error_code);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ le16_to_cpu(resp->req_type),
+ le16_to_cpu(resp->seq_id), rc);
+ return rc;
+ }
+ return 0;
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_rgtr_input req = {0};
+ int i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ /* TODO: current async event fwd bits are not defined and the firmware
+ * only checks if it is non-zero to enable async event forwarding
+ */
+ req.async_event_fwd[0] |= cpu_to_le32(1);
+ req.os_type = cpu_to_le16(1);
+ req.ver_maj = DRV_VER_MAJ;
+ req.ver_min = DRV_VER_MIN;
+ req.ver_upd = DRV_VER_UPD;
+
+ if (BNXT_PF(bp)) {
+ unsigned long vf_req_snif_bmap[4];
+ u32 *data = (u32 *)vf_req_snif_bmap;
+
+ memset(vf_req_snif_bmap, 0, 32);
+ for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
+ __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
+
+ for (i = 0; i < 8; i++) {
+ req.vf_req_fwd[i] = cpu_to_le32(*data);
+ data++;
+ }
+ req.enables |=
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+ }
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+ req.tunnel_type = tunnel_type;
+
+ switch (tunnel_type) {
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+ req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+ break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+ req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+ break;
+ default:
+ break;
+ }
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+ rc);
+ return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+ u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_alloc_input req = {0};
+ struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_val = port;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+ rc);
+ goto err_out;
+ }
+
+ if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+
+ else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+ bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+err_out:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+ req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ req.mask = cpu_to_le32(vnic->rx_mask);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+ req.ntuple_filter_id = fltr->filter_id;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS \
+ (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+ req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+ req.ethertype = htons(ETH_P_IP);
+ memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+ req.ipaddr_type = 4;
+ req.ip_protocol = keys->basic.ip_proto;
+
+ req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+
+ req.src_port = keys->ports.src;
+ req.src_port_mask = cpu_to_be16(0xffff);
+ req.dst_port = keys->ports.dst;
+ req.dst_port_mask = cpu_to_be16(0xffff);
+
+ req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ fltr->filter_id = resp->ntuple_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+ u8 *mac_addr)
+{
+ u32 rc = 0;
+ struct hwrm_cfa_l2_filter_alloc_input req = {0};
+ struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+ req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
+ CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+ req.enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+ req.l2_addr_mask[0] = 0xff;
+ req.l2_addr_mask[1] = 0xff;
+ req.l2_addr_mask[2] = 0xff;
+ req.l2_addr_mask[3] = 0xff;
+ req.l2_addr_mask[4] = 0xff;
+ req.l2_addr_mask[5] = 0xff;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+ resp->l2_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+ u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+ int rc = 0;
+
+ /* Any associated ntuple filters will also be cleared by firmware. */
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_of_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ for (j = 0; j < vnic->uc_filter_count; j++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req,
+ HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ vnic->uc_filter_count = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_tpa_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+ if (tpa_flags) {
+ u16 mss = bp->dev->mtu - 40;
+ u32 nsegs, n, segs = 0, flags;
+
+ flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+ if (tpa_flags & BNXT_FLAG_GRO)
+ flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+ req.flags = cpu_to_le32(flags);
+
+ req.enables =
+ cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+
+ /* Number of segs are log2 units, and first packet is not
+ * included as part of this units.
+ */
+ if (mss <= PAGE_SIZE) {
+ n = PAGE_SIZE / mss;
+ nsegs = (MAX_SKB_FRAGS - 1) * n;
+ } else {
+ n = mss / PAGE_SIZE;
+ if (mss & (PAGE_SIZE - 1))
+ n++;
+ nsegs = (MAX_SKB_FRAGS - n) / n;
+ }
+
+ segs = ilog2(nsegs);
+ req.max_agg_segs = cpu_to_le16(segs);
+ req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ }
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+ u32 i, j, max_rings;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_rss_cfg_input req = {0};
+
+ if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+ if (set_rss) {
+ vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+
+ req.hash_type = cpu_to_le32(vnic->hash_type);
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ /* Fill the RSS indirection table with ring group ids */
+ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+ if (j == max_rings)
+ j = 0;
+ vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+ }
+
+ req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr =
+ cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ }
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+ req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req.enables =
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ /* thresholds not implemented in firmware yet */
+ req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+ req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+ req.rss_cos_lb_ctx_id =
+ cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, i);
+ }
+ bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+ -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+ int grp_idx = 0;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ /* Only RSS support for now TBD: COS & LB */
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
+ VNIC_CFG_REQ_ENABLES_RSS_RULE);
+ req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ req.cos_rule = cpu_to_le16(0xffff);
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ grp_idx = 0;
+ else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+ grp_idx = vnic_id - 1;
+
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+ req.lb_rule = cpu_to_le16(0xffff);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN);
+
+ if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+ req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ u32 rc = 0;
+
+ if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+ req.vnic_id =
+ cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+ bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ }
+ return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+ u16 i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
+ u16 end_grp_id)
+{
+ u32 rc = 0, i, j;
+ struct hwrm_vnic_alloc_input req = {0};
+ struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ /* map ring groups to this vnic */
+ for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+ netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+ j, (end_grp_id - start_grp_id));
+ break;
+ }
+ bp->vnic_info[vnic_id].fw_grp_ids[j] =
+ bp->grp_info[i].fw_grp_id;
+ }
+
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ if (vnic_id == 0)
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct hwrm_ring_grp_alloc_input req = {0};
+ struct hwrm_ring_grp_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+ req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
+ req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
+ req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+ struct hwrm_ring_grp_free_input req = {0};
+
+ if (!bp->grp_info)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+ continue;
+ req.ring_group_id =
+ cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, u32 map_index,
+ u32 stats_ctx_id)
+{
+ int rc = 0, err = 0;
+ struct hwrm_ring_alloc_input req = {0};
+ struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 ring_id;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+ req.enables = 0;
+ if (ring->nr_pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+ /* Page size is in log2 units */
+ req.page_size = BNXT_PAGE_SHIFT;
+ req.page_tbl_depth = 1;
+ } else {
+ req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
+ }
+ req.fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req.logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ req.cmpl_ring_id =
+ cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+ req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+ req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+ req.queue_id = cpu_to_le16(ring->queue_id);
+ break;
+ case HWRM_RING_ALLOC_RX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+ req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+ ring_type);
+ return -1;
+ }
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ err = le16_to_cpu(resp->error_code);
+ ring_id = le16_to_cpu(resp->ring_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || err) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_CMPL, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_TX, i,
+ fw_stats_ctx);
+ if (rc)
+ goto err_out;
+ txr->tx_doorbell = bp->bar1 + i * 0x80;
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_RX, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ rxr->rx_doorbell = bp->bar1 + i * 0x80;
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_AGG,
+ bp->rx_nr_rings + i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+
+ rxr->rx_agg_doorbell =
+ bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+err_out:
+ return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ int rc;
+ struct hwrm_ring_free_input req = {0};
+ struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 error_code;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+ req.ring_type = ring_type;
+ req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ error_code = le16_to_cpu(resp->error_code);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || error_code) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+ rc);
+ return rc;
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+ int i, rc = 0;
+
+ if (!bp->bnapi)
+ return 0;
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_TX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_agg_nr_pages) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ u16 max_buf, max_buf_irq;
+ u16 buf_tmr, buf_tmr_irq;
+ u32 flags;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+ -1, -1);
+
+ /* Each rx completion (2 records) should be DMAed immediately */
+ max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+ /* max_buf must not be zero */
+ max_buf = clamp_t(u16, max_buf, 1, 63);
+ max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
+ buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
+ buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+
+ flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+ /* RING_IDLE generates more IRQs for lower latency. Enable it only
+ * if coal_ticks is less than 25 us.
+ */
+ if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+ req.flags = cpu_to_le16(flags);
+ req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
+ req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
+ req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
+ req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
+ req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
+ req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
+ req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_free_input req = {0};
+
+ if (!bp->bnapi)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_alloc_input req = {0};
+ struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+ req.update_period_ms = cpu_to_le32(1000);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ pf->max_pf_tx_rings = pf->max_tx_rings;
+ pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ pf->max_pf_rx_rings = pf->max_rx_rings;
+ pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ pf->max_vnics = le16_to_cpu(resp->max_vnics);
+ pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+ pf->max_vfs = le16_to_cpu(resp->max_vfs);
+ pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->fw_fid = le16_to_cpu(resp->fid);
+ memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!is_valid_ether_addr(vf->mac_addr))
+ random_ether_addr(vf->mac_addr);
+
+ vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ vf->max_vnics = le16_to_cpu(resp->max_vnics);
+ vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+#endif
+ }
+
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+hwrm_func_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+ struct hwrm_func_reset_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+ req.enables = 0;
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_queue_qportcfg_input req = {0};
+ struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 i, *qptr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bp->max_tc = resp->max_configurable_queues;
+ if (bp->max_tc > BNXT_MAX_QUEUE)
+ bp->max_tc = BNXT_MAX_QUEUE;
+
+ qptr = &resp->queue_id0;
+ for (i = 0; i < bp->max_tc; i++) {
+ bp->q_info[i].queue_id = *qptr++;
+ bp->q_info[i].queue_profile = *qptr++;
+ }
+
+qportcfg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_ver_get_input req = {0};
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
+ req.hwrm_intf_min != resp->hwrm_intf_min ||
+ req.hwrm_intf_upd != resp->hwrm_intf_upd) {
+ netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+ resp->hwrm_intf_maj, resp->hwrm_intf_min,
+ resp->hwrm_intf_upd, req.hwrm_intf_maj,
+ req.hwrm_intf_min, req.hwrm_intf_upd);
+ netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+ }
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+ resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+ resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+hwrm_ver_get_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ bp->vxlan_port_cnt = 0;
+ if (bp->nge_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+ bp->nge_port_cnt = 0;
+}
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+ int rc, i;
+ u32 tpa_flags = 0;
+
+ if (set_tpa)
+ tpa_flags = bp->flags & BNXT_FLAG_TPA;
+ for (i = 0; i < bp->nr_vnics; i++) {
+ rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+ rc, i);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+ bool irq_re_init)
+{
+ if (bp->vnic_info) {
+ bnxt_hwrm_clear_vnic_filter(bp);
+ /* clear all RSS setting before free vnic ctx */
+ bnxt_hwrm_clear_vnic_rss(bp);
+ bnxt_hwrm_vnic_ctx_free(bp);
+ /* before free the vnic, undo the vnic tpa settings */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ bnxt_hwrm_vnic_free(bp);
+ }
+ bnxt_hwrm_ring_free(bp, close_path);
+ bnxt_hwrm_ring_grp_free(bp);
+ if (irq_re_init) {
+ bnxt_hwrm_stat_ctx_free(bp);
+ bnxt_hwrm_free_tunnel_ports(bp);
+ }
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+
+ /* allocate context for vnic */
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+ bp->rsscos_nr_ctxs++;
+
+ /* configure default vnic, ring grp */
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ /* Enable RSS hashing on vnic */
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+ vnic_id, rc);
+ }
+ }
+
+vnic_setup_err:
+ return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ u16 vnic_id = i + 1;
+ u16 ring_id = i;
+
+ if (vnic_id >= bp->nr_vnics)
+ break;
+
+ bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ break;
+ }
+ rc = bnxt_setup_vnic(bp, vnic_id);
+ if (rc)
+ break;
+ }
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+ int rc = 0;
+
+ if (irq_re_init) {
+ rc = bnxt_hwrm_stat_ctx_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+ rc);
+ goto err_out;
+ }
+ }
+
+ rc = bnxt_hwrm_ring_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_ring_grp_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+ goto err_out;
+ }
+
+ /* default vnic 0 */
+ rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_setup_vnic(bp, 0);
+ if (rc)
+ goto err_out;
+
+ if (bp->flags & BNXT_FLAG_RFS) {
+ rc = bnxt_alloc_rfs_vnics(bp);
+ if (rc)
+ goto err_out;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_set_tpa(bp, true);
+ if (rc)
+ goto err_out;
+ }
+
+ if (BNXT_VF(bp))
+ bnxt_update_vf_mac(bp);
+
+ /* Filter for default vnic 0 */
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ goto err_out;
+ }
+ bp->vnic_info[0].uc_filter_count = 1;
+
+ bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ bp->vnic_info[0].rx_mask |=
+ CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_set_coal(bp);
+ if (rc)
+ netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+ rc);
+
+ return 0;
+
+err_out:
+ bnxt_hwrm_resource_free(bp, 0, true);
+
+ return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+ return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_init_rx_rings(bp);
+ bnxt_init_tx_rings(bp);
+ bnxt_init_ring_grps(bp, irq_re_init);
+ bnxt_init_vnics(bp);
+
+ return bnxt_init_chip(bp, irq_re_init);
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+ int i;
+
+ atomic_set(&bp->intr_sem, 0);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ if (rc)
+ return rc;
+
+ rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->rx_nr_rings)
+ dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+ if (!dev->rx_cpu_rmap)
+ rc = -ENOMEM;
+#endif
+
+ return rc;
+}
+
+static int bnxt_setup_msix(struct bnxt *bp)
+{
+ struct msix_entry *msix_ent;
+ struct net_device *dev = bp->dev;
+ int i, total_vecs, rc = 0;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ total_vecs = bp->cp_nr_rings;
+
+ msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!msix_ent)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vecs; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
+ if (total_vecs < 0) {
+ rc = -ENODEV;
+ goto msix_setup_exit;
+ }
+
+ bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (bp->irq_tbl) {
+ int tcs;
+
+ /* Trim rings based upon num of vectors allocated */
+ bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
+ bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1) {
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+ if (bp->tx_nr_rings_per_tc == 0) {
+ netdev_reset_tc(dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ } else {
+ int i, off, count;
+
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
+ }
+ }
+ }
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ snprintf(bp->irq_tbl[i].name, len,
+ "%s-%s-%d", dev->name, "TxRx", i);
+ bp->irq_tbl[i].handler = bnxt_msix;
+ }
+ rc = bnxt_set_real_num_queues(bp);
+ if (rc)
+ goto msix_setup_exit;
+ } else {
+ rc = -ENOMEM;
+ goto msix_setup_exit;
+ }
+ bp->flags |= BNXT_FLAG_USING_MSIX;
+ kfree(msix_ent);
+ return 0;
+
+msix_setup_exit:
+ netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+ pci_disable_msix(bp->pdev);
+ kfree(msix_ent);
+ return rc;
+}
+
+static int bnxt_setup_inta(struct bnxt *bp)
+{
+ int rc;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ if (netdev_get_num_tc(bp->dev))
+ netdev_reset_tc(bp->dev);
+
+ bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (!bp->irq_tbl) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ bp->rx_nr_rings = 1;
+ bp->tx_nr_rings = 1;
+ bp->cp_nr_rings = 1;
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+ snprintf(bp->irq_tbl[0].name, len,
+ "%s-%s-%d", bp->dev->name, "TxRx", 0);
+ bp->irq_tbl[0].handler = bnxt_inta;
+ rc = bnxt_set_real_num_queues(bp);
+ return rc;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (bp->flags & BNXT_FLAG_MSIX_CAP)
+ rc = bnxt_setup_msix(bp);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* fallback to INTA */
+ rc = bnxt_setup_inta(bp);
+ }
+ return rc;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+ struct bnxt_irq *irq;
+ int i;
+
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+ bp->dev->rx_cpu_rmap = NULL;
+#endif
+ if (!bp->irq_tbl)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ irq = &bp->irq_tbl[i];
+ if (irq->requested)
+ free_irq(irq->vector, bp->bnapi[i]);
+ irq->requested = 0;
+ }
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+ int i, rc = 0;
+ unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ flags = IRQF_SHARED;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+ if (rmap && (i < bp->rx_nr_rings)) {
+ rc = irq_cpu_rmap_add(rmap, irq->vector);
+ if (rc)
+ netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+ i);
+ }
+#endif
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ bp->bnapi[i]);
+ if (rc)
+ break;
+
+ irq->requested = 1;
+ }
+ return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ napi_hash_del(&bnapi->napi);
+ netif_napi_del(&bnapi->napi);
+ }
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+
+ if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add(bp->dev, &bnapi->napi,
+ bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+ } else {
+ bnapi = bp->bnapi[0];
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ napi_disable(&bp->bnapi[i]->napi);
+ bnxt_disable_poll(bp->bnapi[i]);
+ }
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnxt_enable_poll(bp->bnapi[i]);
+ napi_enable(&bp->bnapi[i]->napi);
+ }
+}
+
+static void bnxt_tx_disable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ if (bp->bnapi) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ __netif_tx_lock(txq, smp_processor_id());
+ txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ __netif_tx_unlock(txq);
+ }
+ }
+ /* Stop all TX queues */
+ netif_tx_disable(bp->dev);
+ netif_carrier_off(bp->dev);
+}
+
+static void bnxt_tx_enable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ txr->dev_state = 0;
+ }
+ netif_tx_wake_all_queues(bp->dev);
+ if (bp->link_info.link_up)
+ netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+ if (bp->link_info.link_up) {
+ const char *duplex;
+ const char *flow_ctrl;
+ u16 speed;
+
+ netif_carrier_on(bp->dev);
+ if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+ duplex = "full";
+ else
+ duplex = "half";
+ if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+ flow_ctrl = "ON - receive & transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+ flow_ctrl = "ON - transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+ flow_ctrl = "ON - receive";
+ else
+ flow_ctrl = "none";
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ speed, duplex, flow_ctrl);
+ } else {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ }
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcfg_input req = {0};
+ struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 link_up = link_info->link_up;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+
+ memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+ link_info->phy_link_status = resp->link;
+ link_info->duplex = resp->duplex;
+ link_info->pause = resp->pause;
+ link_info->auto_mode = resp->auto_mode;
+ link_info->auto_pause_setting = resp->auto_pause;
+ link_info->force_pause_setting = resp->force_pause;
+ link_info->duplex_setting = resp->duplex_setting;
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_speed = le16_to_cpu(resp->link_speed);
+ else
+ link_info->link_speed = 0;
+ link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+ link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
+ link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+ link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+ link_info->phy_ver[0] = resp->phy_maj;
+ link_info->phy_ver[1] = resp->phy_min;
+ link_info->phy_ver[2] = resp->phy_bld;
+ link_info->media_type = resp->media_type;
+ link_info->transceiver = resp->transceiver_type;
+ link_info->phy_addr = resp->phy_addr;
+
+ /* TODO: need to add more logic to report VF link */
+ if (chng_link_state) {
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_up = 1;
+ else
+ link_info->link_up = 0;
+ if (link_up != link_info->link_up)
+ bnxt_report_link(bp);
+ } else {
+ /* alwasy link down if not require to update link state */
+ link_info->link_up = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+ if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+ } else {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+ }
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ u8 autoneg = bp->link_info.autoneg;
+ u16 fw_link_speed = bp->link_info.req_link_speed;
+ u32 advertising = bp->link_info.advertising;
+
+ if (autoneg & BNXT_AUTONEG_SPEED) {
+ req->auto_mode |=
+ PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+
+ req->enables |= cpu_to_le32(
+ PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+ req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+ req->flags |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+ } else {
+ req->force_link_speed = cpu_to_le16(fw_link_speed);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+ }
+
+ /* currently don't support half duplex */
+ req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
+ /* tell chimp that the setting takes effect immediately */
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+ bp->link_info.force_link_chng)
+ bnxt_hwrm_set_link_common(bp, &req);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+ /* since changing of pause setting doesn't trigger any link
+ * change event, the driver needs to update the current pause
+ * result upon successfully return of the phy_cfg command
+ */
+ bp->link_info.pause =
+ bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+ bp->link_info.auto_pause_setting = 0;
+ if (!bp->link_info.force_link_chng)
+ bnxt_report_link(bp);
+ }
+ bp->link_info.force_link_chng = false;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ if (set_pause)
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ bnxt_hwrm_set_link_common(bp, &req);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+ int rc;
+ bool update_link = false;
+ bool update_pause = false;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ rc = bnxt_update_link(bp, true);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+ if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->auto_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->force_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (link_info->req_duplex != link_info->duplex_setting)
+ update_link = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ update_link = true;
+ if (link_info->req_link_speed != link_info->force_link_speed)
+ update_link = true;
+ } else {
+ if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+ update_link = true;
+ if (link_info->advertising != link_info->auto_link_speeds)
+ update_link = true;
+ if (link_info->req_link_speed != link_info->auto_link_speed)
+ update_link = true;
+ }
+
+ if (update_link)
+ rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+ else if (update_pause)
+ rc = bnxt_hwrm_set_pause(bp);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ netif_carrier_off(bp->dev);
+ if (irq_re_init) {
+ rc = bnxt_setup_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+ rc);
+ return rc;
+ }
+ }
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* disable RFS if falling back to INTA */
+ bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+ bp->flags &= ~BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_alloc_mem(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto open_err_free_mem;
+ }
+
+ if (irq_re_init) {
+ bnxt_init_napi(bp);
+ rc = bnxt_request_irq(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+ goto open_err;
+ }
+ }
+
+ bnxt_enable_napi(bp);
+
+ rc = bnxt_init_nic(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto open_err;
+ }
+
+ if (link_re_init) {
+ rc = bnxt_update_phy_setting(bp);
+ if (rc)
+ goto open_err;
+ }
+
+ if (irq_re_init) {
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+ vxlan_get_rx_port(bp->dev);
+#endif
+ if (!bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, htons(0x17c1),
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+ bp->nge_port_cnt = 1;
+ }
+
+ bp->state = BNXT_STATE_OPEN;
+ bnxt_enable_int(bp);
+ /* Enable TX queues */
+ bnxt_tx_enable(bp);
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+ return 0;
+
+open_err:
+ bnxt_disable_napi(bp);
+ bnxt_del_napi(bp);
+
+open_err_free_mem:
+ bnxt_free_skbs(bp);
+ bnxt_free_irq(bp);
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+ dev_close(bp->dev);
+ }
+ return rc;
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+ rc);
+ rc = -1;
+ return rc;
+ }
+ return __bnxt_open_nic(bp, true, true);
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+ if (!netif_running(bp->dev))
+ return;
+
+ bnxt_disable_int(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ /* Change device state to avoid TX queue wake up's */
+ bnxt_tx_disable(bp);
+
+ bp->state = BNXT_STATE_CLOSED;
+ cancel_work_sync(&bp->sp_task);
+
+ /* Flush rings before disabling interrupts */
+ bnxt_shutdown_nic(bp, irq_re_init);
+
+ /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ del_timer_sync(&bp->timer);
+ bnxt_free_skbs(bp);
+
+ if (irq_re_init) {
+ bnxt_free_irq(bp);
+ bnxt_del_napi(bp);
+ }
+ bnxt_free_mem(bp, irq_re_init);
+ return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bnxt_close_nic(bp, true, true);
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ /* fallthru */
+ case SIOCGMIIREG: {
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+ }
+
+ case SIOCSMIIREG:
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_stats64 *
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ u32 i;
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ if (!bp->bnapi)
+ return stats;
+
+ /* TODO check if we need to synchronize with bnxt_close path */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+ stats->rx_missed_errors +=
+ le64_to_cpu(hw_stats->rx_discard_pkts);
+
+ stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+ stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
+
+ stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ }
+
+ return stats;
+}
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ u8 *haddr;
+ int mc_count = 0;
+ bool update = false;
+ int off = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNXT_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ u32 mask = vnic->rx_mask;
+ bool mc_update = false;
+ bool uc_update;
+
+ if (!netif_running(dev))
+ return;
+
+ mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+ CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+ /* Only allow PF to be in promiscuous mode */
+ if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ uc_update = bnxt_uc_list_updated(bp);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else {
+ mc_update = bnxt_mc_list_updated(bp, &mask);
+ }
+
+ if (mask != vnic->rx_mask || uc_update || mc_update) {
+ vnic->rx_mask = mask;
+
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnxt_uc_list_updated(bp);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+ -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+ rc);
+ vnic->uc_filter_count = i;
+ }
+ }
+
+skip_uc:
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc)
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ rc);
+}
+
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 flags = bp->flags;
+ u32 changes;
+ int rc = 0;
+ bool re_init = false;
+ bool update_tpa = false;
+
+ flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+ if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ flags |= BNXT_FLAG_GRO;
+ if (features & NETIF_F_LRO)
+ flags |= BNXT_FLAG_LRO;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ flags |= BNXT_FLAG_STRIP_VLAN;
+
+ if (features & NETIF_F_NTUPLE)
+ flags |= BNXT_FLAG_RFS;
+
+ changes = flags ^ bp->flags;
+ if (changes & BNXT_FLAG_TPA) {
+ update_tpa = true;
+ if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+ (flags & BNXT_FLAG_TPA) == 0)
+ re_init = true;
+ }
+
+ if (changes & ~BNXT_FLAG_TPA)
+ re_init = true;
+
+ if (flags != bp->flags) {
+ u32 old_flags = bp->flags;
+
+ bp->flags = flags;
+
+ if (!netif_running(dev)) {
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return rc;
+ }
+
+ if (re_init) {
+ bnxt_close_nic(bp, false, false);
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+
+ return bnxt_open_nic(bp, false, false);
+ }
+ if (update_tpa) {
+ rc = bnxt_set_tpa(bp,
+ (flags & BNXT_FLAG_TPA) ?
+ true : false);
+ if (rc)
+ bp->flags = old_flags;
+ }
+ }
+ return rc;
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ rxr = &bnapi->rx_ring;
+ cpr = &bnapi->cp_ring;
+ if (netif_msg_drv(bp)) {
+ netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, txr->tx_ring_struct.fw_ring_id,
+ txr->tx_prod, txr->tx_cons);
+ netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+ i, rxr->rx_ring_struct.fw_ring_id,
+ rxr->rx_prod,
+ rxr->rx_agg_ring_struct.fw_ring_id,
+ rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
+ netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, cpr->cp_ring_struct.fw_ring_id,
+ cpr->cp_raw_cons);
+ }
+ }
+}
+
+static void bnxt_reset_task(struct bnxt *bp)
+{
+ bnxt_dbg_dump_states(bp);
+ if (netif_running(bp->dev))
+ bnxt_tx_disable(bp); /* prevent tx timout again */
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+
+ disable_irq(irq->vector);
+ irq->handler(irq->vector, bp->bnapi[i]);
+ enable_irq(irq->vector);
+ }
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+ struct bnxt *bp = (struct bnxt *)data;
+ struct net_device *dev = bp->dev;
+
+ if (!netif_running(dev))
+ return;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ goto bnxt_restart_timer;
+
+bnxt_restart_timer:
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+ struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+ int rc;
+
+ if (bp->state != BNXT_STATE_OPEN)
+ return;
+
+ if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_rx_mode(bp);
+
+ if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_ntp_filters(bp);
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ rc = bnxt_update_link(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, bp->vxlan_port,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ bnxt_reset_task(bp);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+ int rc;
+ struct bnxt *bp = netdev_priv(dev);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ goto init_err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_disable;
+ }
+
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto init_err_disable;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+ goto init_err_disable;
+ }
+
+ pci_set_master(pdev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+
+ bp->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bp->bar0) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar1 = pci_ioremap_bar(pdev, 2);
+ if (!bp->bar1) {
+ dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar2 = pci_ioremap_bar(pdev, 4);
+ if (!bp->bar2) {
+ dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+ spin_lock_init(&bp->ntp_fltr_lock);
+
+ bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+ bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
+ bp->coal_bufs = 20;
+ bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
+ bp->coal_bufs_irq = 2;
+
+ init_timer(&bp->timer);
+ bp->timer.data = (unsigned long)bp;
+ bp->timer.function = bnxt_timer;
+ bp->current_interval = BNXT_TIMER_INTERVAL;
+
+ bp->state = BNXT_STATE_CLOSED;
+
+ return 0;
+
+init_err_release:
+ if (bp->bar2) {
+ pci_iounmap(pdev, bp->bar2);
+ bp->bar2 = NULL;
+ }
+
+ if (bp->bar1) {
+ pci_iounmap(pdev, bp->bar1);
+ bp->bar1 = NULL;
+ }
+
+ if (bp->bar0) {
+ pci_iounmap(pdev, bp->bar0);
+ bp->bar0 = NULL;
+ }
+
+ pci_release_regions(pdev);
+
+init_err_disable:
+ pci_disable_device(pdev);
+
+init_err:
+ return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (new_mtu < 60 || new_mtu > 9000)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ dev->mtu = new_mtu;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (tc > bp->max_tc) {
+ netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+ tc, bp->max_tc);
+ return -EINVAL;
+ }
+
+ if (netdev_get_num_tc(dev) == tc)
+ return 0;
+
+ if (tc) {
+ int max_rx_rings, max_tx_rings;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
+ return -ENOMEM;
+ }
+
+ /* Needs to close the device and do hw resource re-allocations */
+ if (netif_running(bp->dev))
+ bnxt_close_nic(bp, true, false);
+
+ if (tc) {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+ netdev_set_num_tc(dev, tc);
+ } else {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ netdev_reset_tc(dev);
+ }
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(bp->dev))
+ return bnxt_open_nic(bp, true, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+ struct bnxt_ntuple_filter *f2)
+{
+ struct flow_keys *keys1 = &f1->fkeys;
+ struct flow_keys *keys2 = &f2->fkeys;
+
+ if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+ keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+ keys1->ports.ports == keys2->ports.ports &&
+ keys1->basic.ip_proto == keys2->basic.ip_proto &&
+ keys1->basic.n_proto == keys2->basic.n_proto &&
+ ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+ return true;
+
+ return false;
+}
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ntuple_filter *fltr, *new_fltr;
+ struct flow_keys *fkeys;
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+ int rc = 0, idx;
+ struct hlist_head *head;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ fkeys = &new_fltr->fkeys;
+ if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+ ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+ (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+ idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (bnxt_fltr_match(fltr, new_fltr)) {
+ rcu_read_unlock();
+ rc = 0;
+ goto err_free;
+ }
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ BNXT_NTP_FLTR_MAX_FLTR, 0);
+ if (new_fltr->sw_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ new_fltr->flow_id = flow_id;
+ new_fltr->rxq = rxq_index;
+ hlist_add_head_rcu(&new_fltr->hash, head);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+
+ set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+
+ return new_fltr->sw_id;
+
+err_free:
+ kfree(new_fltr);
+ return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+ int rc;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ bool del = false;
+
+ if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+ if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ fltr->flow_id,
+ fltr->sw_id)) {
+ bnxt_hwrm_cfa_ntuple_filter_free(bp,
+ fltr);
+ del = true;
+ }
+ } else {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+ fltr);
+ if (rc)
+ del = true;
+ else
+ set_bit(BNXT_FLTR_VALID, &fltr->state);
+ }
+
+ if (del) {
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ hlist_del_rcu(&fltr->hash);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ synchronize_rcu();
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ kfree(fltr);
+ }
+ }
+ }
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+ return;
+
+ bp->vxlan_port_cnt++;
+ if (bp->vxlan_port_cnt == 1) {
+ bp->vxlan_port = port;
+ set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+ bp->vxlan_port_cnt--;
+
+ if (bp->vxlan_port_cnt == 0) {
+ set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+ }
+}
+
+static const struct net_device_ops bnxt_netdev_ops = {
+ .ndo_open = bnxt_open,
+ .ndo_start_xmit = bnxt_start_xmit,
+ .ndo_stop = bnxt_close,
+ .ndo_get_stats64 = bnxt_get_stats64,
+ .ndo_set_rx_mode = bnxt_set_rx_mode,
+ .ndo_do_ioctl = bnxt_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnxt_change_mac_addr,
+ .ndo_change_mtu = bnxt_change_mtu,
+ .ndo_fix_features = bnxt_fix_features,
+ .ndo_set_features = bnxt_set_features,
+ .ndo_tx_timeout = bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+ .ndo_get_vf_config = bnxt_get_vf_config,
+ .ndo_set_vf_mac = bnxt_set_vf_mac,
+ .ndo_set_vf_vlan = bnxt_set_vf_vlan,
+ .ndo_set_vf_rate = bnxt_set_vf_bw,
+ .ndo_set_vf_link_state = bnxt_set_vf_link_state,
+ .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnxt_poll_controller,
+#endif
+ .ndo_setup_tc = bnxt_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = bnxt_rx_flow_steer,
+#endif
+ .ndo_add_vxlan_port = bnxt_add_vxlan_port,
+ .ndo_del_vxlan_port = bnxt_del_vxlan_port,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = bnxt_busy_poll,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (BNXT_PF(bp))
+ bnxt_sriov_disable(bp);
+
+ unregister_netdev(dev);
+ cancel_work_sync(&bp->sp_task);
+ bp->sp_event = 0;
+
+ bnxt_free_hwrm_resources(bp);
+ pci_iounmap(pdev, bp->bar2);
+ pci_iounmap(pdev, bp->bar1);
+ pci_iounmap(pdev, bp->bar0);
+ free_netdev(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ char phy_ver[PHY_VER_STR_LEN];
+
+ rc = bnxt_update_link(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ /*initialize the ethool setting copy with NVM settings */
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl = link_info->auto_pause_setting;
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
+ }
+ link_info->req_duplex = link_info->duplex_setting;
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+ link_info->req_link_speed = link_info->auto_link_speed;
+ else
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->advertising = link_info->auto_link_speeds;
+ snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
+ link_info->phy_ver[0],
+ link_info->phy_ver[1],
+ link_info->phy_ver[2]);
+ strcat(bp->fw_ver_str, phy_ver);
+ return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ if (!pdev->msix_cap)
+ return 1;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
+{
+ int max_rings = 0;
+
+ if (BNXT_PF(bp)) {
+ *max_tx = bp->pf.max_pf_tx_rings;
+ *max_rx = bp->pf.max_pf_rx_rings;
+ max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ *max_tx = bp->vf.max_tx_rings;
+ *max_rx = bp->vf.max_rx_rings;
+ max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+#endif
+ }
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ *max_rx >>= 1;
+
+ *max_rx = min_t(int, *max_rx, max_rings);
+ *max_tx = min_t(int, *max_tx, max_rings);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int version_printed;
+ struct net_device *dev;
+ struct bnxt *bp;
+ int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
+
+ if (version_printed++ == 0)
+ pr_info("%s", version);
+
+ max_irqs = bnxt_get_max_irq(pdev);
+ dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ if (!dev)
+ return -ENOMEM;
+
+ bp = netdev_priv(dev);
+
+ if (bnxt_vf_pciid(ent->driver_data))
+ bp->flags |= BNXT_FLAG_VF;
+
+ if (pdev->msix_cap) {
+ bp->flags |= BNXT_FLAG_MSIX_CAP;
+ if (BNXT_PF(bp))
+ bp->flags |= BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_init_board(pdev, dev);
+ if (rc < 0)
+ goto init_err_free;
+
+ dev->netdev_ops = &bnxt_netdev_ops;
+ dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+ dev->ethtool_ops = &bnxt_ethtool_ops;
+
+ pci_set_drvdata(pdev, dev);
+
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ dev->hw_features |= NETIF_F_NTUPLE;
+
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef CONFIG_BNXT_SRIOV
+ init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc)
+ goto init_err;
+
+ mutex_init(&bp->hwrm_cmd_lock);
+ bnxt_hwrm_ver_get(bp);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ goto init_err;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ bnxt_set_tpa_flags(bp);
+ bnxt_set_ring_params(bp);
+ dflt_rings = netif_get_num_default_rss_queues();
+ if (BNXT_PF(bp)) {
+ memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ bp->pf.max_irqs = max_irqs;
+ } else {
+#if defined(CONFIG_BNXT_SRIOV)
+ memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ bp->vf.max_irqs = max_irqs;
+#endif
+ }
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+ bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+ rc = bnxt_probe_phy(bp);
+ if (rc)
+ goto init_err;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto init_err;
+
+ netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+ return 0;
+
+init_err:
+ pci_iounmap(pdev, bp->bar0);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+init_err_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static struct pci_driver bnxt_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnxt_pci_tbl,
+ .probe = bnxt_init_one,
+ .remove = bnxt_remove_one,
+#if defined(CONFIG_BNXT_SRIOV)
+ .sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+module_pci_driver(bnxt_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
new file mode 100644
index 0000000..4f2267c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -0,0 +1,1086 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME "bnxt_en"
+#define DRV_MODULE_VERSION "0.1.24"
+
+#define DRV_VER_MAJ 0
+#define DRV_VER_MIN 1
+#define DRV_VER_UPD 24
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+ __le32 tx_bd_hsize_lflags;
+ #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
+ #define TX_BD_FLAGS_IP_CKSUM (1 << 1)
+ #define TX_BD_FLAGS_NO_CRC (1 << 2)
+ #define TX_BD_FLAGS_STAMP (1 << 3)
+ #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4)
+ #define TX_BD_FLAGS_LSO (1 << 5)
+ #define TX_BD_FLAGS_IPID_FMT (1 << 6)
+ #define TX_BD_FLAGS_T_IPID (1 << 7)
+ #define TX_BD_HSIZE (0xff << 16)
+ #define TX_BD_HSIZE_SHIFT 16
+
+ __le32 tx_bd_mss;
+ __le32 tx_bd_cfa_action;
+ #define TX_BD_CFA_ACTION (0xffff << 16)
+ #define TX_BD_CFA_ACTION_SHIFT 16
+
+ __le32 tx_bd_cfa_meta;
+ #define TX_BD_CFA_META_MASK 0xfffffff
+ #define TX_BD_CFA_META_VID_MASK 0xfff
+ #define TX_BD_CFA_META_PRI_MASK (0xf << 12)
+ #define TX_BD_CFA_META_PRI_SHIFT 12
+ #define TX_BD_CFA_META_TPID_MASK (3 << 16)
+ #define TX_BD_CFA_META_TPID_SHIFT 16
+ #define TX_BD_CFA_META_KEY (0xf << 28)
+ #define TX_BD_CFA_META_KEY_SHIFT 28
+ #define TX_BD_CFA_META_KEY_VLAN (1 << 28)
+};
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+
+ __le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+ __le32 rx_cmp_len_flags_type;
+ #define RX_CMP_CMP_TYPE (0x3f << 0)
+ #define RX_CMP_FLAGS_ERROR (1 << 6)
+ #define RX_CMP_FLAGS_PLACEMENT (7 << 7)
+ #define RX_CMP_FLAGS_RSS_VALID (1 << 10)
+ #define RX_CMP_FLAGS_UNUSED (1 << 11)
+ #define RX_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
+ #define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
+ #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
+ #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12)
+ #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12)
+ #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12)
+ #define RX_CMP_LEN (0xffff << 16)
+ #define RX_CMP_LEN_SHIFT 16
+
+ u32 rx_cmp_opaque;
+ __le32 rx_cmp_misc_v1;
+ #define RX_CMP_V1 (1 << 0)
+ #define RX_CMP_AGG_BUFS (0x1f << 1)
+ #define RX_CMP_AGG_BUFS_SHIFT 1
+ #define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+
+ __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RX_CMP_HASH_TYPE(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+ RX_CMP_RSS_HASH_TYPE_SHIFT)
+
+struct rx_cmp_ext {
+ __le32 rx_cmp_flags2;
+ #define RX_CMP_FLAGS2_IP_CS_CALC 0x1
+ #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
+ __le32 rx_cmp_meta_data;
+ #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
+ #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
+ #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
+ __le32 rx_cmp_cfa_code_errors_v2;
+ #define RX_CMP_V (1 << 0)
+ #define RX_CMPL_ERRORS_MASK (0x7fff << 1)
+ #define RX_CMPL_ERRORS_SFT 1
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4)
+ #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5)
+ #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6)
+ #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7)
+ #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9)
+ #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12)
+
+ #define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
+ #define RX_CMPL_CFA_CODE_SFT 16
+
+ __le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS \
+ cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS \
+ (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS \
+ (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
+ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+ __le32 rx_agg_cmp_len_flags_type;
+ #define RX_AGG_CMP_TYPE (0x3f << 0)
+ #define RX_AGG_CMP_LEN (0xffff << 16)
+ #define RX_AGG_CMP_LEN_SHIFT 16
+ u32 rx_agg_cmp_opaque;
+ __le32 rx_agg_cmp_v;
+ #define RX_AGG_CMP_V (1 << 0)
+ __le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+ __le32 rx_tpa_start_cmp_len_flags_type;
+ #define RX_TPA_START_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_START_CMP_LEN (0xffff << 16)
+ #define RX_TPA_START_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_start_cmp_opaque;
+ __le32 rx_tpa_start_cmp_misc_v1;
+ #define RX_TPA_START_CMP_V1 (0x1 << 0)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
+
+#define TPA_START_AGG_ID(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+ __le32 rx_tpa_start_cmp_flags2;
+ #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
+ #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+
+ __le32 rx_tpa_start_cmp_metadata;
+ __le32 rx_tpa_start_cmp_cfa_code_v2;
+ #define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
+ #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ __le32 rx_tpa_start_cmp_unused5;
+};
+
+struct rx_tpa_end_cmp {
+ __le32 rx_tpa_end_cmp_len_flags_type;
+ #define RX_TPA_END_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_END_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_END_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_END_CMP_LEN (0xffff << 16)
+ #define RX_TPA_END_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_end_cmp_opaque;
+ __le32 rx_tpa_end_cmp_misc_v1;
+ #define RX_TPA_END_CMP_V1 (0x1 << 0)
+ #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1
+ #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8)
+ #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_end_cmp_tsdelta;
+ #define RX_TPA_END_GRO_TS (0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \
+ cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+
+struct rx_tpa_end_cmp_ext {
+ __le32 rx_tpa_end_cmp_dup_acks;
+ #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+
+ __le32 rx_tpa_end_cmp_seg_len;
+ #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
+
+ __le32 rx_tpa_end_cmp_errors_v2;
+ #define RX_TPA_END_CMP_V2 (0x1 << 0)
+ #define RX_TPA_END_CMP_ERRORS (0x7fff << 1)
+ #define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+
+ u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define DB_IDX_MASK 0xffffff
+#define DB_IDX_VALID (0x1 << 26)
+#define DB_IRQ_DIS (0x1 << 27)
+#define DB_KEY_TX (0x0 << 28)
+#define DB_KEY_RX (0x1 << 28)
+#define DB_KEY_CP (0x2 << 28)
+#define DB_KEY_ST (0x3 << 28)
+#define DB_KEY_TX_PUSH (0x4 << 28)
+#define DB_LONG_TX_PUSH (0x2 << 24)
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
+
+/* The hardware supports certain page sizes. Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT 13
+#else
+#define BNXT_PAGE_SHIFT 16
+#endif
+
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+
+#define BNXT_MIN_PKT_SIZE 45
+
+#define BNXT_NUM_TESTS(bp) 0
+
+#define BNXT_DEFAULT_RX_RING_SIZE 1023
+#define BNXT_DEFAULT_TX_RING_SIZE 512
+
+#define MAX_TPA 64
+
+#define MAX_RX_PAGES 8
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 64
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons) \
+ (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons) \
+ (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons) \
+ (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp) \
+ (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define HWRM_CMD_TIMEOUT 500
+#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK 0xffff
+#define HWRM_RESP_LEN_MASK 0xffff0000
+#define HWRM_RESP_LEN_SFT 16
+#define HWRM_RESP_VALID_MASK 0xff000000
+#define BNXT_HWRM_REQ_MAX_SIZE 128
+#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
+ BNXT_HWRM_REQ_MAX_SIZE)
+
+struct bnxt_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ u8 is_gso;
+ u8 is_push;
+ unsigned short nr_frags;
+};
+
+struct bnxt_sw_rx_bd {
+ u8 *data;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnxt_sw_rx_agg_bd {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct bnxt_ring_struct {
+ int nr_pages;
+ int page_size;
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t pg_tbl_map;
+
+ int vmem_size;
+ void **vmem;
+
+ u16 fw_ring_id; /* Ring id filled by Chimp FW */
+ u8 queue_id;
+};
+
+struct tx_push_bd {
+ __le32 doorbell;
+ struct tx_bd txbd1;
+ struct tx_bd_ext txbd2;
+};
+
+struct bnxt_tx_ring_info {
+ u16 tx_prod;
+ u16 tx_cons;
+ void __iomem *tx_doorbell;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnxt_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ struct tx_push_bd *tx_push;
+ dma_addr_t tx_push_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+ u32 dev_state;
+
+ struct bnxt_ring_struct tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+ u8 *data;
+ dma_addr_t mapping;
+ u16 len;
+ unsigned short gso_type;
+ u32 flags2;
+ u32 metadata;
+ enum pkt_hash_types hash_type;
+ u32 rss_hash;
+};
+
+struct bnxt_rx_ring_info {
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ void __iomem *rx_doorbell;
+ void __iomem *rx_agg_doorbell;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnxt_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnxt_sw_rx_agg_bd *rx_agg_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnxt_tpa_info *rx_tpa;
+
+ struct bnxt_ring_struct rx_ring_struct;
+ struct bnxt_ring_struct rx_agg_ring_struct;
+};
+
+struct bnxt_cp_ring_info {
+ u32 cp_raw_cons;
+ void __iomem *cp_doorbell;
+
+ struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+
+ dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
+
+ struct ctx_hw_stats *hw_stats;
+ dma_addr_t hw_stats_map;
+ u32 hw_stats_ctx_id;
+ u64 rx_l4_csum_errors;
+
+ struct bnxt_ring_struct cp_ring_struct;
+};
+
+struct bnxt_napi {
+ struct napi_struct napi;
+ struct bnxt *bp;
+
+ int index;
+ struct bnxt_cp_ring_info cp_ring;
+ struct bnxt_rx_ring_info rx_ring;
+ struct bnxt_tx_ring_info tx_ring;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ atomic_t poll_state;
+#endif
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+ BNXT_STATE_IDLE = 0,
+ BNXT_STATE_NAPI,
+ BNXT_STATE_POLL,
+ BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested;
+ char name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+
+#define INVALID_STATS_CTX_ID -1
+
+struct hwrm_cmd_req_hdr {
+#define HWRM_CMPL_RING_MASK 0xffff0000
+#define HWRM_CMPL_RING_SFT 16
+ __le32 cmpl_ring_req_type;
+#define HWRM_SEQ_ID_MASK 0xffff
+#define HWRM_SEQ_ID_INVALID -1
+#define HWRM_RESP_LEN_OFFSET 4
+#define HWRM_TARGET_FID_MASK 0xffff0000
+#define HWRM_TARGET_FID_SFT 16
+ __le32 target_id_seq_id;
+ __le64 resp_addr;
+};
+
+struct bnxt_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+ u16 fw_vnic_id; /* returned by Chimp during alloc */
+ u16 fw_rss_cos_lb_ctx;
+ u16 fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS 4
+ __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ /* index 0 always dev_addr */
+ u16 uc_filter_count;
+ u8 *uc_list;
+
+ u16 *fw_grp_ids;
+ u16 hash_type;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNXT_VNIC_RSS_FLAG 1
+#define BNXT_VNIC_RFS_FLAG 2
+#define BNXT_VNIC_MCAST_FLAG 4
+#define BNXT_VNIC_UCAST_FLAG 8
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings;
+ u16 max_rx_rings;
+ u16 max_l2_ctxs;
+ u16 max_irqs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u16 vlan;
+ u32 flags;
+#define BNXT_VF_QOS 0x1
+#define BNXT_VF_SPOOFCHK 0x2
+#define BNXT_VF_LINK_FORCED 0x4
+#define BNXT_VF_LINK_UP 0x8
+ u32 func_flags; /* func cfg flags */
+ u32 min_tx_rate;
+ u32 max_tx_rate;
+ void *hwrm_cmd_req_addr;
+ dma_addr_t hwrm_cmd_req_dma_addr;
+};
+#endif
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID 1
+#define BNXT_FIRST_VF_FID 128
+ u32 fw_fid;
+ u8 port_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings; /* HW assigned max tx rings for this PF */
+ u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */
+ u16 max_rx_rings; /* HW assigned max rx rings for this PF */
+ u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */
+ u16 max_irqs;
+ u16 max_l2_ctxs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u32 first_vf_id;
+ u16 active_vfs;
+ u16 max_vfs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+ unsigned long *vf_event_bmap;
+ u16 hwrm_cmd_req_pages;
+ void *hwrm_cmd_req_addr[4];
+ dma_addr_t hwrm_cmd_req_dma_addr[4];
+ struct bnxt_vf_info *vf;
+};
+
+struct bnxt_ntuple_filter {
+ struct hlist_node hash;
+ u8 src_mac_addr[ETH_ALEN];
+ struct flow_keys fkeys;
+ __le64 filter_id;
+ u16 sw_id;
+ u16 rxq;
+ u32 flow_id;
+ unsigned long state;
+#define BNXT_FLTR_VALID 0
+#define BNXT_FLTR_UPDATE 1
+};
+
+#define BNXT_ALL_COPPER_ETHTOOL_SPEED \
+ (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \
+ ADVERTISED_10000baseT_Full)
+
+struct bnxt_link_info {
+ u8 media_type;
+ u8 transceiver;
+ u8 phy_addr;
+ u8 phy_link_status;
+#define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 wire_speed;
+ u8 loop_back;
+ u8 link_up;
+ u8 duplex;
+#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF
+#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL
+ u8 pause;
+#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \
+ PORT_PHY_QCFG_RESP_PAUSE_TX)
+ u8 auto_pause_setting;
+ u8 force_pause_setting;
+ u8 duplex_setting;
+ u8 auto_mode;
+#define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \
+ (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define PHY_VER_LEN 3
+ u8 phy_ver[PHY_VER_LEN];
+ u16 link_speed;
+#define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+ u16 support_speeds;
+ u16 auto_link_speeds;
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 auto_link_speed;
+ u16 force_link_speed;
+ u32 preemphasis;
+
+ /* copy of requested setting from ethtool cmd */
+ u8 autoneg;
+#define BNXT_AUTONEG_SPEED 1
+#define BNXT_AUTONEG_FLOW_CTRL 2
+ u8 req_duplex;
+ u8 req_flow_ctrl;
+ u16 req_link_speed;
+ u32 advertising;
+ bool force_link_chng;
+ /* a copy of phy_qcfg output used to report link
+ * info to VF
+ */
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE 8
+
+struct bnxt_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnxt {
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *bar2;
+
+ u32 reg_base;
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ atomic_t intr_sem;
+
+ u32 flags;
+ #define BNXT_FLAG_DCB_ENABLED 0x1
+ #define BNXT_FLAG_VF 0x2
+ #define BNXT_FLAG_LRO 0x4
+#ifdef CONFIG_INET
+ #define BNXT_FLAG_GRO 0x8
+#else
+ /* Cannot support hardware GRO if CONFIG_INET is not set */
+ #define BNXT_FLAG_GRO 0x0
+#endif
+ #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+ #define BNXT_FLAG_JUMBO 0x10
+ #define BNXT_FLAG_STRIP_VLAN 0x20
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO)
+ #define BNXT_FLAG_USING_MSIX 0x40
+ #define BNXT_FLAG_MSIX_CAP 0x80
+ #define BNXT_FLAG_RFS 0x100
+ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
+ BNXT_FLAG_RFS | \
+ BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+
+ struct bnxt_napi **bnapi;
+
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* useable size */
+ u32 rx_ring_size;
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ int rx_nr_pages;
+ int rx_agg_nr_pages;
+ int rx_nr_rings;
+ int rsscos_nr_ctxs;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ int tx_nr_pages;
+ int tx_nr_rings;
+ int tx_nr_rings_per_tc;
+
+ int tx_wake_thresh;
+ int tx_push_thresh;
+ int tx_push_size;
+
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ int cp_nr_pages;
+ int cp_nr_rings;
+
+ int num_stat_ctxs;
+ struct bnxt_ring_grp_info *grp_info;
+ struct bnxt_vnic_info *vnic_info;
+ int nr_vnics;
+
+ u8 max_tc;
+ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
+
+ unsigned int current_interval;
+#define BNXT_TIMER_INTERVAL (HZ / 2)
+
+ struct timer_list timer;
+
+ int state;
+#define BNXT_STATE_CLOSED 0
+#define BNXT_STATE_OPEN 1
+
+ struct bnxt_irq *irq_tbl;
+ u8 mac_addr[ETH_ALEN];
+
+ u32 msg_enable;
+
+ u16 hwrm_cmd_seq;
+ u32 hwrm_intr_seq_id;
+ void *hwrm_cmd_resp_addr;
+ dma_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_dbg_resp_addr;
+ dma_addr_t hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE 128
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+#define BC_HWRM_STR_LEN 21
+#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+ char fw_ver_str[FW_VER_STR_LEN];
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __le16 vxlan_fw_dst_port_id;
+ u8 nge_port_cnt;
+ __le16 nge_fw_dst_port_id;
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
+
+ struct work_struct sp_task;
+ unsigned long sp_event;
+#define BNXT_RX_MASK_SP_EVENT 0
+#define BNXT_RX_NTP_FLTR_SP_EVENT 1
+#define BNXT_LINK_CHNG_SP_EVENT 2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT 8
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT 16
+#define BNXT_RESET_TASK_SP_EVENT 32
+#define BNXT_RST_RING_SP_EVENT 64
+
+ struct bnxt_pf_info pf;
+#ifdef CONFIG_BNXT_SRIOV
+ int nr_vfs;
+ struct bnxt_vf_info vf;
+ wait_queue_head_t sriov_cfg_wait;
+ bool sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_NTP_FLTR_HASH_SIZE 512
+#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
+ struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+ spinlock_t ntp_fltr_lock; /* for hash table add, del */
+
+ unsigned long *ntp_fltr_bmap;
+ int ntp_fltr_count;
+
+ struct bnxt_link_info link_info;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_NAPI);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_POLL);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+ int old;
+
+ while (1) {
+ old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_DISABLE);
+ if (old == BNXT_STATE_IDLE)
+ break;
+ usleep_range(500, 5000);
+ }
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+void bnxt_set_ring_params(struct bnxt *);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_max_rings(struct bnxt *, int *, int *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
new file mode 100644
index 0000000..45bd628
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -0,0 +1,1149 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(*coal));
+
+ coal->rx_coalesce_usecs =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
+ coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
+ coal->rx_coalesce_usecs_irq =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
+ coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+
+ return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
+ bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
+ bp->coal_ticks_irq =
+ BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
+ bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_coal(bp);
+
+ return rc;
+}
+
+#define BNXT_NUM_STATS 21
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return BNXT_NUM_STATS * bp->cp_nr_rings;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ u32 i, j = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
+ u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+ memset(buf, 0, buf_size);
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ int k;
+
+ for (k = 0; k < stat_fields; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j++] = cpr->rx_l4_csum_errors;
+ }
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 i;
+
+ switch (stringset) {
+ /* The number of strings must match BNXT_NUM_STATS defined above. */
+ case ETH_SS_STATS:
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ sprintf(buf, "[%d]: rx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_events", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_aborts", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+ buf += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+ stringset);
+ break;
+ }
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+ ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+ ering->rx_pending = bp->rx_ring_size;
+ ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+ (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ bp->rx_ring_size = ering->rx_pending;
+ bp->tx_ring_size = ering->tx_pending;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static void bnxt_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ channel->max_rx = max_rx_rings;
+ channel->max_tx = max_tx_rings;
+ channel->max_other = 0;
+ channel->max_combined = 0;
+ channel->rx_count = bp->rx_nr_rings;
+ channel->tx_count = bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+ u32 rc = 0;
+
+ if (channel->other_count || channel->combined_count ||
+ !channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ if (channel->rx_count > max_rx_rings ||
+ channel->tx_count > max_tx_rings)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ if (BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * before PF unload
+ */
+ }
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ netdev_err(bp->dev, "Set channel failure rc :%x\n",
+ rc);
+ return rc;
+ }
+ }
+
+ bp->rx_nr_rings = channel->rx_count;
+ bp->tx_nr_rings_per_tc = channel->tx_count;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ if (tcs > 1)
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(dev)) {
+ rc = bnxt_open_nic(bp, true, false);
+ if ((!rc) && BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * to renable
+ */
+ }
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int i, j = 0;
+
+ cmd->data = bp->ntp_fltr_count;
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (j == cmd->rule_cnt)
+ break;
+ rule_locs[j++] = fltr->sw_id;
+ }
+ rcu_read_unlock();
+ if (j == cmd->rule_cnt)
+ break;
+ }
+ cmd->rule_cnt = j;
+ return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_ntuple_filter *fltr;
+ struct flow_keys *fkeys;
+ int i, rc = -EINVAL;
+
+ if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ return rc;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->sw_id == fs->location)
+ goto fltr_found;
+ }
+ rcu_read_unlock();
+ }
+ return rc;
+
+fltr_found:
+ fkeys = &fltr->fkeys;
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V4_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V4_FLOW;
+ else
+ goto fltr_err;
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+
+ fs->ring_cookie = fltr->rxq;
+ rc = 0;
+
+fltr_err:
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->rx_nr_rings;
+ break;
+
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->ntp_fltr_count;
+ cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ break;
+
+ case ETHTOOL_GRXCLSRLALL:
+ rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+ break;
+
+ case ETHTOOL_GRXCLSRULE:
+ rc = bnxt_grxclsrule(bp, cmd);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+#endif
+
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+ return HW_HASH_INDEX_SIZE;
+}
+
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+ return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int i = 0;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (indir)
+ for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+ indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+ if (key)
+ memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ return 0;
+}
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ info->testinfo_len = BNXT_NUM_TESTS(bp);
+ /* TODO CHIMP_FW: eeprom dump details */
+ info->eedump_len = 0;
+ /* TODO CHIMP FW: reg dump details */
+ info->regdump_len = 0;
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->support_speeds;
+ u32 speed_mask = 0;
+
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= SUPPORTED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= SUPPORTED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= SUPPORTED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= SUPPORTED_10000baseT_Full;
+ /* TODO: support 25GB, 50GB with different cable type */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full;
+
+ return speed_mask;
+}
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->auto_link_speeds;
+ u32 speed_mask = 0;
+
+ /* TODO: support 25GB, 40GB, 50GB with different cable type */
+ /* set the advertised speeds */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= ADVERTISED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= ADVERTISED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= ADVERTISED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= ADVERTISED_10000baseT_Full;
+ /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= ADVERTISED_20000baseMLD2_Full |
+ ADVERTISED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_40000baseCR4_Full |
+ ADVERTISED_40000baseSR4_Full |
+ ADVERTISED_40000baseLR4_Full;
+ return speed_mask;
+}
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+ switch (fw_link_speed) {
+ case BNXT_LINK_SPEED_100MB:
+ return SPEED_100;
+ case BNXT_LINK_SPEED_1GB:
+ return SPEED_1000;
+ case BNXT_LINK_SPEED_2_5GB:
+ return SPEED_2500;
+ case BNXT_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case BNXT_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case BNXT_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case BNXT_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case BNXT_LINK_SPEED_50GB:
+ return SPEED_50000;
+ default:
+ return SPEED_UNKNOWN;
+ }
+}
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 ethtool_speed;
+
+ cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+ if (link_info->auto_link_speeds)
+ cmd->supported |= SUPPORTED_Autoneg;
+
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ cmd->advertising =
+ bnxt_fw_to_ethtool_advertised_spds(link_info);
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising = 0;
+ }
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->advertising |= ADVERTISED_Pause;
+ }
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->force_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->supported |= SUPPORTED_Pause;
+ }
+ }
+
+ cmd->port = PORT_NONE;
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ cmd->port = PORT_TP;
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ } else {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ cmd->port = PORT_DA;
+ else if (link_info->media_type ==
+ PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+ cmd->port = PORT_FIBRE;
+ }
+
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
+ ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ ethtool_cmd_speed_set(cmd, ethtool_speed);
+ if (link_info->transceiver ==
+ PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+ cmd->transceiver = XCVR_INTERNAL;
+ else
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = link_info->phy_addr;
+
+ return 0;
+}
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+ switch (ethtool_speed) {
+ case SPEED_100:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ case SPEED_1000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ case SPEED_2500:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ case SPEED_10000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ case SPEED_20000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ case SPEED_25000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ case SPEED_40000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ case SPEED_50000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ default:
+ netdev_err(dev, "unsupported speed!\n");
+ break;
+ }
+ return 0;
+}
+
+static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+ u16 fw_speed_mask = 0;
+
+ /* only support autoneg at speed 100, 1000, and 10000 */
+ if (advertising & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+ }
+ if (advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+ }
+ if (advertising & ADVERTISED_10000baseT_Full)
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+ return fw_speed_mask;
+}
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u32 speed, fw_advertising = 0;
+ bool set_pause = false;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ netdev_err(dev, "Media type doesn't support autoneg\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
+ ADVERTISED_Autoneg |
+ ADVERTISED_TP |
+ ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause)) {
+ netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+ if (fw_advertising & ~link_info->support_speeds) {
+ netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+ if (!fw_advertising)
+ link_info->advertising = link_info->support_speeds;
+ else
+ link_info->advertising = fw_advertising;
+ /* any change to autoneg will cause link change, therefore the
+ * driver should put back the original pause setting in autoneg
+ */
+ set_pause = true;
+ } else {
+ /* TODO: currently don't support half duplex */
+ if (cmd->duplex == DUPLEX_HALF) {
+ netdev_err(dev, "HALF DUPLEX is not supported!\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ /* If received a request for an unknown duplex, assume full*/
+ if (cmd->duplex == DUPLEX_UNKNOWN)
+ cmd->duplex = DUPLEX_FULL;
+ speed = ethtool_cmd_speed(cmd);
+ link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+ link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->advertising = 0;
+ }
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+
+set_setting_exit:
+ return rc;
+}
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return;
+ epause->autoneg = !!(link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_BOTH);
+ epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
+ epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (epause->autoneg) {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+ } else {
+ /* when transition from auto pause to force pause,
+ * force a link change
+ */
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->force_link_chng = true;
+ link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+ }
+ if (epause->rx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
+
+ if (epause->tx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_pause(bp);
+ return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* TODO: handle MF, VF, driver close case */
+ return bp->link_info.link_up;
+}
+
+static int bnxt_flash_nvram(struct net_device *dev,
+ u16 dir_type,
+ u16 dir_ordinal,
+ u16 dir_ext,
+ u16 dir_attr,
+ const u8 *data,
+ size_t data_len)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_write_input req = {0};
+ dma_addr_t dma_handle;
+ u8 *kmem;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+ req.dir_type = cpu_to_le16(dir_type);
+ req.dir_ordinal = cpu_to_le16(dir_ordinal);
+ req.dir_ext = cpu_to_le16(dir_ext);
+ req.dir_attr = cpu_to_le16(dir_attr);
+ req.dir_data_length = cpu_to_le32(data_len);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+ GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)data_len);
+ return -ENOMEM;
+ }
+ memcpy(kmem, data, data_len);
+ req.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+ return rc;
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ int rc = 0;
+ u16 code_type;
+ u32 stored_crc;
+ u32 calculated_crc;
+ struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+ switch (dir_type) {
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ code_type = CODE_BOOT;
+ break;
+ default:
+ netdev_err(dev, "Unsupported directory entry type: %u\n",
+ dir_type);
+ return -EINVAL;
+ }
+ if (fw_size < sizeof(struct bnxt_fw_header)) {
+ netdev_err(dev, "Invalid firmware file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+ netdev_err(dev, "Invalid firmware signature: %08X\n",
+ le32_to_cpu(header->signature));
+ return -EINVAL;
+ }
+ if (header->code_type != code_type) {
+ netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+ code_type, header->code_type);
+ return -EINVAL;
+ }
+ if (header->device != DEVICE_CUMULUS_FAMILY) {
+ netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+ DEVICE_CUMULUS_FAMILY, header->device);
+ return -EINVAL;
+ }
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+ if (rc == 0) { /* Firmware update successful */
+ /* TODO: Notify processor it needs to reset itself
+ */
+ }
+ return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ case BNX_DIR_TYPE_APE_FW:
+ case BNX_DIR_TYPE_APE_PATCH:
+ case BNX_DIR_TYPE_KONG_FW:
+ case BNX_DIR_TYPE_KONG_PATCH:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_AVS:
+ case BNX_DIR_TYPE_EXP_ROM_MBA:
+ case BNX_DIR_TYPE_PCIE:
+ case BNX_DIR_TYPE_TSCF_UCODE:
+ case BNX_DIR_TYPE_EXT_PHY:
+ case BNX_DIR_TYPE_CCM:
+ case BNX_DIR_TYPE_ISCSI_BOOT:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+ return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+ bnxt_dir_type_is_unprotected_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+ u16 dir_type,
+ const char *filename)
+{
+ const struct firmware *fw;
+ int rc;
+
+ if (bnxt_dir_type_is_executable(dir_type) == false)
+ return -EINVAL;
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "Error %d requesting firmware file: %s\n",
+ rc, filename);
+ return rc;
+ }
+ if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw->data, fw->size);
+ release_firmware(fw);
+ return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+ char *filename)
+{
+ netdev_err(dev, "packages are not yet supported\n");
+ return -EINVAL;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+ netdev_err(dev, "flashdev not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
+ return bnxt_flash_package_from_file(dev, flash->data);
+
+ return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_get_dir_info_input req = {0};
+ struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ *entries = le32_to_cpu(output->entries);
+ *length = le32_to_cpu(output->entry_length);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+ /* The -1 return value allows the entire 32-bit range of offsets to be
+ * passed via the ethtool command-line utility.
+ */
+ return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u32 dir_entries;
+ u32 entry_length;
+ u8 *buf;
+ size_t buflen;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_get_dir_entries_input req = {0};
+
+ rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ /* Insert 2 bytes of directory info (count and size of entries) */
+ if (len < 2)
+ return -EINVAL;
+
+ *data++ = dir_entries;
+ *data++ = entry_length;
+ len -= 2;
+ memset(data, 0xff, len);
+
+ buflen = dir_entries * entry_length;
+ buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)buflen);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, len > buflen ? buflen : len);
+ dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u8 *buf;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_read_input req = {0};
+
+ buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)length);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ req.dir_idx = cpu_to_le16(index);
+ req.offset = cpu_to_le32(offset);
+ req.len = cpu_to_le32(length);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, length);
+ dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u32 index;
+ u32 offset;
+
+ if (eeprom->offset == 0) /* special offset value to get directory */
+ return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+ index = eeprom->offset >> 24;
+ offset = eeprom->offset & 0xffffff;
+
+ if (index == 0) {
+ netdev_err(dev, "unsupported index value: %d\n", index);
+ return -EINVAL;
+ }
+
+ return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+ req.dir_idx = cpu_to_le16(index);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 index, dir_op;
+ u16 type, ext, ordinal, attr;
+
+ if (!BNXT_PF(bp)) {
+ netdev_err(dev, "NVM write not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ type = eeprom->magic >> 16;
+
+ if (type == 0xffff) { /* special value for directory operations */
+ index = eeprom->magic & 0xff;
+ dir_op = eeprom->magic >> 8;
+ if (index == 0)
+ return -EINVAL;
+ switch (dir_op) {
+ case 0x0e: /* erase */
+ if (eeprom->offset != ~eeprom->magic)
+ return -EINVAL;
+ return bnxt_erase_nvram_directory(dev, index - 1);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Create or re-write an NVM item: */
+ if (bnxt_dir_type_is_executable(type) == true)
+ return -EINVAL;
+ ext = eeprom->magic & 0xffff;
+ ordinal = eeprom->offset >> 16;
+ attr = eeprom->offset & 0xffff;
+
+ return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+ eeprom->len);
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+ .get_settings = bnxt_get_settings,
+ .set_settings = bnxt_set_settings,
+ .get_pauseparam = bnxt_get_pauseparam,
+ .set_pauseparam = bnxt_set_pauseparam,
+ .get_drvinfo = bnxt_get_drvinfo,
+ .get_coalesce = bnxt_get_coalesce,
+ .set_coalesce = bnxt_set_coalesce,
+ .get_msglevel = bnxt_get_msglevel,
+ .set_msglevel = bnxt_set_msglevel,
+ .get_sset_count = bnxt_get_sset_count,
+ .get_strings = bnxt_get_strings,
+ .get_ethtool_stats = bnxt_get_ethtool_stats,
+ .set_ringparam = bnxt_set_ringparam,
+ .get_ringparam = bnxt_get_ringparam,
+ .get_channels = bnxt_get_channels,
+ .set_channels = bnxt_set_channels,
+#ifdef CONFIG_RFS_ACCEL
+ .get_rxnfc = bnxt_get_rxnfc,
+#endif
+ .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
+ .get_rxfh_key_size = bnxt_get_rxfh_key_size,
+ .get_rxfh = bnxt_get_rxfh,
+ .flash_device = bnxt_flash_device,
+ .get_eeprom_len = bnxt_get_eeprom_len,
+ .get_eeprom = bnxt_get_eeprom,
+ .set_eeprom = bnxt_set_eeprom,
+ .get_link = bnxt_get_link,
+};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
new file mode 100644
index 0000000..98fa81e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -0,0 +1,17 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 bnxt_fw_to_ethtool_speed(u16);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
new file mode 100644
index 0000000..e0aac65
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -0,0 +1,104 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+
+enum SUPPORTED_FAMILY {
+ DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
+ DEVICE_5705_FAMILY, /* 1 - Bachelor */
+ DEVICE_SHASTA_FAMILY, /* 2 - 5751 */
+ DEVICE_5706_FAMILY, /* 3 - Teton */
+ DEVICE_5714_FAMILY, /* 4 - Hamilton */
+ DEVICE_STANFORD_FAMILY, /* 5 - 5755 */
+ DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */
+ DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */
+ DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */
+ DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */
+ DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */
+ DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family
+ */
+ DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia)
+ */
+ DEVICE_LOGAN_57767, /* 13 - Logan Client */
+ DEVICE_LOGAN_57787, /* 14 - Logan Consumer */
+ DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled)
+ */
+ DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */
+ DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */
+ DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */
+ DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */
+ MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+ CODE_ASF1, /* 0 - ASF VERSION 1.03 <deprecated> */
+ CODE_ASF2, /* 1 - ASF VERSION 2.00 <deprecated> */
+ CODE_PASSTHRU, /* 2 - PassThru <deprecated> */
+ CODE_PT_SEC, /* 3 - PassThru with security <deprecated> */
+ CODE_UMP, /* 4 - UMP <deprecated> */
+ CODE_BOOT, /* 5 - Bootcode */
+ CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI)
+ * Management firmwares
+ */
+ CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */
+ CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares
+ */
+ CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys
+ * Offload firmware
+ */
+ CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */
+ CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares
+ * <deprecated>
+ */
+ CODE_ARP_BATCH, /* 12 - ARP Batch firmware */
+ CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+ * Management firmware
+ */
+ CODE_APE_DIAG, /* 14 - APE Test Diag firmware */
+ CODE_APE_PATCH, /* 15 - APE Patch firmware */
+ CODE_TANG_PATCH, /* 16 - TANG Patch firmware */
+ CODE_KONG_FW, /* 17 - KONG firmware */
+ CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
+ CODE_BONO_FW, /* 19 - BONO firmware */
+ CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
+
+ MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+ MEDIA_COPPER, /* 0 */
+ MEDIA_FIBER, /* 1 */
+ MEDIA_NONE, /* 2 */
+ MEDIA_COPPER_FIBER, /* 3 */
+ MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+ __le32 signature; /* constains the constant value of
+ * BNXT_Firmware_Bin_Signatures
+ */
+ u8 flags; /* reserved for ChiMP use */
+ u8 code_type; /* enum SUPPORTED_CODE */
+ u8 device; /* enum SUPPORTED_FAMILY */
+ u8 media; /* enum SUPPORTED_MEDIA */
+ u8 version[16]; /* the null terminated version string to
+ * indicate the version of the
+ * file, this will be copied from the binary
+ * file version string
+ */
+ u8 build;
+ u8 revision;
+ u8 minor_ver;
+ u8 major_ver;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
new file mode 100644
index 0000000..70fc825
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -0,0 +1,4046 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* per-context HW statistics -- chip view */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define HWRM_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_CMPL_TYPE_SFT 0
+ #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define HWRM_CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused_0;
+ __le32 req_buf_addr_v[2];
+ #define HWRM_FWD_REQ_CMPL_V 0x1UL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define HWRM_FWD_RESP_CMPL_V 0x1UL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* HW Resource Manager Specification 0.7.8 */
+#define HWRM_VERSION_MAJOR 0
+#define HWRM_VERSION_MINOR 7
+#define HWRM_VERSION_UPDATE 8
+
+#define HWRM_VERSION_STR "0.7.8"
+/* Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
+/* Input (16 bytes) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET (0x0UL)
+ #define HWRM_FUNC_DISABLE (0x10UL)
+ #define HWRM_FUNC_RESET (0x11UL)
+ #define HWRM_FUNC_GETFID (0x12UL)
+ #define HWRM_FUNC_VF_ALLOC (0x13UL)
+ #define HWRM_FUNC_VF_FREE (0x14UL)
+ #define HWRM_FUNC_QCAPS (0x15UL)
+ #define HWRM_FUNC_QCFG (0x16UL)
+ #define HWRM_FUNC_CFG (0x17UL)
+ #define HWRM_FUNC_QSTATS (0x18UL)
+ #define HWRM_FUNC_CLR_STATS (0x19UL)
+ #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
+ #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
+ #define HWRM_FUNC_DRV_RGTR (0x1dUL)
+ #define HWRM_FUNC_DRV_QVER (0x1eUL)
+ #define HWRM_FUNC_BUF_RGTR (0x1fUL)
+ #define HWRM_FUNC_VF_CFG (0x20UL)
+ #define HWRM_PORT_PHY_CFG (0x20UL)
+ #define HWRM_PORT_MAC_CFG (0x21UL)
+ #define HWRM_PORT_ENABLE (0x22UL)
+ #define HWRM_PORT_QSTATS (0x23UL)
+ #define HWRM_PORT_LPBK_QSTATS (0x24UL)
+ #define HWRM_PORT_CLR_STATS (0x25UL)
+ #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
+ #define HWRM_PORT_PHY_QCFG (0x27UL)
+ #define HWRM_PORT_MAC_QCFG (0x28UL)
+ #define HWRM_PORT_BLINK_LED (0x29UL)
+ #define HWRM_QUEUE_QPORTCFG (0x30UL)
+ #define HWRM_QUEUE_QCFG (0x31UL)
+ #define HWRM_QUEUE_CFG (0x32UL)
+ #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
+ #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
+ #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
+ #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
+ #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
+ #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
+ #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
+ #define HWRM_VNIC_ALLOC (0x40UL)
+ #define HWRM_VNIC_FREE (0x41UL)
+ #define HWRM_VNIC_CFG (0x42UL)
+ #define HWRM_VNIC_QCFG (0x43UL)
+ #define HWRM_VNIC_TPA_CFG (0x44UL)
+ #define HWRM_VNIC_TPA_QCFG (0x45UL)
+ #define HWRM_VNIC_RSS_CFG (0x46UL)
+ #define HWRM_VNIC_RSS_QCFG (0x47UL)
+ #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
+ #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_RING_ALLOC (0x50UL)
+ #define HWRM_RING_FREE (0x51UL)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
+ #define HWRM_RING_RESET (0x5eUL)
+ #define HWRM_RING_GRP_ALLOC (0x60UL)
+ #define HWRM_RING_GRP_FREE (0x61UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
+ #define HWRM_ARB_GRP_ALLOC (0x80UL)
+ #define HWRM_ARB_GRP_CFG (0x81UL)
+ #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
+ #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
+ #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
+ #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
+ #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
+ #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
+ #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
+ #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
+ #define HWRM_STAT_CTX_ALLOC (0xb0UL)
+ #define HWRM_STAT_CTX_FREE (0xb1UL)
+ #define HWRM_STAT_CTX_QUERY (0xb2UL)
+ #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
+ #define HWRM_FW_RESET (0xc0UL)
+ #define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_EXEC_FWD_RESP (0xd0UL)
+ #define HWRM_REJECT_FWD_RESP (0xd1UL)
+ #define HWRM_FWD_RESP (0xd2UL)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
+ #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL)
+ #define HWRM_MGMT_L2_FILTER_FREE (0x101UL)
+ #define HWRM_DBG_READ_DIRECT (0xff10UL)
+ #define HWRM_DBG_READ_INDIRECT (0xff11UL)
+ #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
+ #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
+ #define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_MODIFY (0xfff4UL)
+ #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
+ #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
+ #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
+ #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
+ #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
+ #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
+ #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
+ #define HWRM_NVM_RAW_DUMP (0xfffcUL)
+ #define HWRM_NVM_READ (0xfffdUL)
+ #define HWRM_NVM_WRITE (0xfffeUL)
+ #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS (0x0UL)
+ #define HWRM_ERR_CODE_FAIL (0x1UL)
+ #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
+ #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
+ #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
+ #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 opaque_2;
+ u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 hwrm_intf_rsvd;
+ u8 hwrm_fw_maj;
+ u8 hwrm_fw_min;
+ u8 hwrm_fw_bld;
+ u8 hwrm_fw_rsvd;
+ u8 ape_fw_maj;
+ u8 ape_fw_min;
+ u8 ape_fw_bld;
+ u8 ape_fw_rsvd;
+ u8 kong_fw_maj;
+ u8 kong_fw_min;
+ u8 kong_fw_bld;
+ u8 kong_fw_rsvd;
+ u8 tang_fw_maj;
+ u8 tang_fw_min;
+ u8 tang_fw_bld;
+ u8 tang_fw_rsvd;
+ u8 bono_fw_maj;
+ u8 bono_fw_min;
+ u8 bono_fw_bld;
+ u8 bono_fw_rsvd;
+ char hwrm_fw_name[16];
+ char ape_fw_name[16];
+ char kong_fw_name[16];
+ char tang_fw_name[16];
+ char bono_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 unused_0;
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_disable */
+/* Input (24 bytes) */
+struct hwrm_func_disable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_disable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (24 bytes) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ __le16 mtu;
+ __le16 guest_vlan;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ u8 perm_mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL
+ #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL
+ #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ __le32 max_bw;
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 allowed_vlan_pris;
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ u8 unused_2;
+ __le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 timestamp;
+ __le32 unused_2;
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL
+ #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL
+ __le16 fid;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (48 bytes) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 duplex;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 duplex_setting;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0)
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ u8 transceiver_type;
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ u8 phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ u8 unused_2;
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (32 bytes) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 ivlan_pri2cos_map_pri;
+ u8 lcos_map_pri;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_enable */
+/* Input (24 bytes) */
+struct hwrm_port_enable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_enable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (64 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_blink_led */
+/* Input (24 bytes) */
+struct hwrm_port_blink_led_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 num_blinks;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_blink_led_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_buffers_cfg_allowed;
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_buffers_cfg */
+/* Input (56 bytes) */
+struct hwrm_queue_buffers_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL
+ __le32 queue_id;
+ __le32 reserved;
+ __le32 shared;
+ __le32 xoff;
+ __le32 xon;
+ __le32 full;
+ __le32 notfull;
+ __le32 max;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_buffers_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ __le32 enables;
+ u8 port_id;
+ u8 pri0_cos;
+ u8 pri1_cos;
+ u8 pri2_cos;
+ u8 pri3_cos;
+ u8 pri4_cos;
+ u8 pri5_cos;
+ u8 pri6_cos;
+ u8 pri7_cos;
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ __le32 queue_id0_max_bw;
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ __le32 queue_id1_max_bw;
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ __le32 queue_id2_max_bw;
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ __le32 queue_id3_max_bw;
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ __le32 queue_id4_max_bw;
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ __le32 queue_id5_max_bw;
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ __le32 queue_id6_max_bw;
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ __le32 queue_id7_max_bw;
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ __le32 unused_0;
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL
+ #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_4;
+ u8 unused_5;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ u8 unused_6;
+ u8 unused_7;
+ __le32 weight;
+ __le32 stat_ctx_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_arb_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 input_number;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 arb_grp_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_cfg */
+/* Input (32 bytes) */
+struct hwrm_arb_grp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ __le16 tx_ring;
+ __le32 weight;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ u8 t_l2_addr[6];
+ u8 unused_4;
+ u8 unused_5;
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ u8 unused_6;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_7;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ u8 unused_8;
+ __le32 unused_9;
+ __le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL
+ __le64 l2_filter_id;
+ __le32 dst_vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_bcastmcast_mirroring */
+/* Input (32 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mirroring_flags;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
+ __le16 vlan_id;
+ u8 bcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ u8 mcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 encap_data[16];
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 encap_record_id;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 encap_record_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ipaddr_type;
+ u8 ip_protocol;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 pri_hint;
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+ __le32 unused_0;
+ __le64 ntuple_filter_id;
+ __le32 new_dst_vnic_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_alloc */
+/* Input (56 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL
+ u8 l2_address[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_address_mask[6];
+ __le16 ovlan;
+ __le16 ovlan_mask;
+ __le16 ivlan;
+ __le16 ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 action_id;
+ u8 action_bypass;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL
+ u8 unused_5[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mgmt_l2_filter_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_mgmt_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 mgmt_l2_filter_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le32 dest_addr;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 offset;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (40 bytes) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 0000000..3cf3e1b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,59 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+ BNX_DIR_TYPE_UNUSED = 0,
+ BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_CHIMP_PATCH = 3,
+ BNX_DIR_TYPE_BOOTCODE = 4,
+ BNX_DIR_TYPE_VPD = 5,
+ BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+ BNX_DIR_TYPE_AVS = 7,
+ BNX_DIR_TYPE_PCIE = 8,
+ BNX_DIR_TYPE_PORT_MACRO = 9,
+ BNX_DIR_TYPE_APE_FW = 10,
+ BNX_DIR_TYPE_APE_PATCH = 11,
+ BNX_DIR_TYPE_KONG_FW = 12,
+ BNX_DIR_TYPE_KONG_PATCH = 13,
+ BNX_DIR_TYPE_BONO_FW = 14,
+ BNX_DIR_TYPE_BONO_PATCH = 15,
+ BNX_DIR_TYPE_TANG_FW = 16,
+ BNX_DIR_TYPE_TANG_PATCH = 17,
+ BNX_DIR_TYPE_BOOTCODE_2 = 18,
+ BNX_DIR_TYPE_CCM = 19,
+ BNX_DIR_TYPE_PCI_CFG = 20,
+ BNX_DIR_TYPE_TSCF_UCODE = 21,
+ BNX_DIR_TYPE_ISCSI_BOOT = 22,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+ BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+ BNX_DIR_TYPE_EXT_PHY = 27,
+ BNX_DIR_TYPE_SHARED_CFG = 40,
+ BNX_DIR_TYPE_PORT_CFG = 41,
+ BNX_DIR_TYPE_FUNC_CFG = 42,
+ BNX_DIR_TYPE_MGMT_CFG = 48,
+ BNX_DIR_TYPE_MGMT_DATA = 49,
+ BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+ BNX_DIR_TYPE_MGMT_WEB_META = 51,
+ BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+ BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST 0
+
+#define BNX_DIR_EXT_INACTIVE (1 << 0)
+#define BNX_DIR_EXT_UPDATE (1 << 1)
+
+#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+
+#endif /* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
new file mode 100644
index 0000000..60989e7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -0,0 +1,816 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+ if (bp->state != BNXT_STATE_OPEN) {
+ netdev_err(bp->dev, "vf ndo called though PF is down\n");
+ return -EINVAL;
+ }
+ if (!bp->pf.active_vfs) {
+ netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+ if (vf_id >= bp->pf.max_vfs) {
+ netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ bool old_setting = false;
+ u32 func_flags;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ if (vf->flags & BNXT_VF_SPOOFCHK)
+ old_setting = true;
+ if (old_setting == setting)
+ return 0;
+
+ func_flags = vf->func_flags;
+ if (setting)
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ else
+ func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ /*TODO: if the driver supports VLAN filter on guest VLAN,
+ * the spoof check should also include vlan anti-spoofing
+ */
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(func_flags);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->func_flags = func_flags;
+ if (setting)
+ vf->flags |= BNXT_VF_SPOOFCHK;
+ else
+ vf->flags &= ~BNXT_VF_SPOOFCHK;
+ }
+ return rc;
+}
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ ivi->vf = vf_id;
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+ ivi->vlan = vf->vlan;
+ ivi->qos = vf->flags & BNXT_VF_QOS;
+ ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+ if (!(vf->flags & BNXT_VF_LINK_FORCED))
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->flags & BNXT_VF_LINK_UP)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
+ return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+ /* reject bc or mc mac addr, zero mac addr means allow
+ * VF to use its own mac addr
+ */
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(dev, "Invalid VF ethernet address\n");
+ return -EINVAL;
+ }
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(vf->mac_addr, mac, ETH_ALEN);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u16 vlan_tag;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ /* TODO: needed to implement proper handling of user priority,
+ * currently fail the command if there is valid priority
+ */
+ if (vlan_id > 4095 || qos)
+ return -EINVAL;
+
+ vf = &bp->pf.vf[vf_id];
+ vlan_tag = vlan_id;
+ if (vlan_tag == vf->vlan)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.dflt_vlan = cpu_to_le16(vlan_tag);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ vf->vlan = vlan_tag;
+ return rc;
+}
+
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u32 pf_link_speed;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (max_tx_rate > pf_link_speed) {
+ netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+ max_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+ if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+ return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+ req.max_bw = cpu_to_le32(max_tx_rate);
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req.min_bw = cpu_to_le32(min_tx_rate);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->min_tx_rate = min_tx_rate;
+ vf->max_tx_rate = max_tx_rate;
+ }
+ return rc;
+}
+
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+
+ vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->flags |= BNXT_VF_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->flags |= BNXT_VF_LINK_FORCED;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+ break;
+ default:
+ netdev_err(bp->dev, "Invalid link option\n");
+ rc = -EINVAL;
+ break;
+ }
+ /* CHIMP TODO: send msg to VF to update new link state */
+
+ return rc;
+}
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+ int i;
+ struct bnxt_vf_info *vf;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &bp->pf.vf[i];
+ memset(vf, 0, sizeof(*vf));
+ vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct hwrm_func_vf_resc_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
+ req.vf_id = cpu_to_le16(i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ kfree(bp->pf.vf_event_bmap);
+ bp->pf.vf_event_bmap = NULL;
+
+ for (i = 0; i < 4; i++) {
+ if (bp->pf.hwrm_cmd_req_addr[i]) {
+ dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ bp->pf.hwrm_cmd_req_addr[i],
+ bp->pf.hwrm_cmd_req_dma_addr[i]);
+ bp->pf.hwrm_cmd_req_addr[i] = NULL;
+ }
+ }
+
+ kfree(bp->pf.vf);
+ bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ u32 nr_pages, size, i, j, k = 0;
+
+ bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+ if (!bp->pf.vf)
+ return -ENOMEM;
+
+ bnxt_set_vf_attr(bp, num_vfs);
+
+ size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+ nr_pages = size / BNXT_PAGE_SIZE;
+ if (size & (BNXT_PAGE_SIZE - 1))
+ nr_pages++;
+
+ for (i = 0; i < nr_pages; i++) {
+ bp->pf.hwrm_cmd_req_addr[i] =
+ dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ &bp->pf.hwrm_cmd_req_dma_addr[i],
+ GFP_KERNEL);
+
+ if (!bp->pf.hwrm_cmd_req_addr[i])
+ return -ENOMEM;
+
+ for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+ struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+ vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+ j * BNXT_HWRM_REQ_MAX_SIZE;
+ vf->hwrm_cmd_req_dma_addr =
+ bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+ BNXT_HWRM_REQ_MAX_SIZE;
+ k++;
+ }
+ }
+
+ /* Max 128 VF's */
+ bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+ if (!bp->pf.vf_event_bmap)
+ return -ENOMEM;
+
+ bp->pf.hwrm_cmd_req_pages = nr_pages;
+ return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_buf_rgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+ req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+ req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+ req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+ req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+ req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+ req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+ req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+{
+ u32 rc = 0, mtu, i;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+ /* Remaining rings are distributed equally amongs VF's for now */
+ /* TODO: the following workaroud is needed to restrict total number
+ * of vf_cp_rings not exceed number of HW ring groups. This WA should
+ * be removed once new HWRM provides HW ring groups capability in
+ * hwrm_func_qcap.
+ */
+ vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
+ vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+ /* TODO: restore this logic below once the WA above is removed */
+ /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
+ vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
+ *num_vfs;
+ else
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
+ *num_vfs;
+ vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+ FUNC_CFG_REQ_ENABLES_MRU |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+
+ mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ req.mru = cpu_to_le16(mtu);
+ req.mtu = cpu_to_le16(mtu);
+
+ req.num_rsscos_ctxs = cpu_to_le16(1);
+ req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.num_l2_ctxs = cpu_to_le16(4);
+ vf_vnics = 1;
+
+ req.num_vnics = cpu_to_le16(vf_vnics);
+ /* FIXME spec currently uses 1 bit for stats ctx */
+ req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < *num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->pf.active_vfs = i + 1;
+ bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc) {
+ bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
+ else
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+ }
+ return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+ int rc = 0, vfs_supported;
+ int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+ /* Check if we can enable requested num of vf's. At a mininum
+ * we require 1 RX 1 TX rings for each VF. In this minimum conf
+ * features like TPA will not be available.
+ */
+ vfs_supported = *num_vfs;
+
+ while (vfs_supported) {
+ min_rx_rings = vfs_supported;
+ min_tx_rings = vfs_supported;
+ min_rss_ctxs = vfs_supported;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ min_rx_rings)
+ rx_ok = 1;
+ } else {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ min_rx_rings)
+ rx_ok = 1;
+ }
+
+ if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+ tx_ok = 1;
+
+ if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ rss_ok = 1;
+
+ if (tx_ok && rx_ok && rss_ok)
+ break;
+
+ vfs_supported--;
+ }
+
+ if (!vfs_supported) {
+ netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+ return -EINVAL;
+ }
+
+ if (vfs_supported != *num_vfs) {
+ netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+ *num_vfs, vfs_supported);
+ *num_vfs = vfs_supported;
+ }
+
+ rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+ if (rc)
+ goto err_out1;
+
+ /* Reserve resources for VFs */
+ rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+ if (rc)
+ goto err_out2;
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ goto err_out2;
+
+ rc = pci_enable_sriov(bp->pdev, *num_vfs);
+ if (rc)
+ goto err_out2;
+
+ return 0;
+
+err_out2:
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+err_out1:
+ bnxt_free_vf_resources(bp);
+
+ return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!bp->pf.active_vfs)
+ return;
+
+ pci_disable_sriov(bp->pdev);
+
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+ bnxt_free_vf_resources(bp);
+
+ bp->pf.active_vfs = 0;
+ bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
+ bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+ return 0;
+ }
+
+ rtnl_lock();
+ if (!netif_running(dev)) {
+ netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ rtnl_unlock();
+ return 0;
+ }
+ bp->sriov_cfg = true;
+ rtnl_unlock();
+ if (!num_vfs) {
+ bnxt_sriov_disable(bp);
+ return 0;
+ }
+
+ /* Check if enabled VFs is same as requested */
+ if (num_vfs == bp->pf.active_vfs)
+ return 0;
+
+ bnxt_sriov_enable(bp, &num_vfs);
+
+ bp->sriov_cfg = false;
+ wake_up(&bp->sriov_cfg_wait);
+
+ return num_vfs;
+}
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ void *encap_resp, __le64 encap_resp_addr,
+ __le16 encap_resp_cpr, u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_fwd_resp_input req = {0};
+ struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ req.encap_resp_len = cpu_to_le16(msg_size);
+ req.encap_resp_addr = encap_resp_addr;
+ req.encap_resp_cmpl_ring = encap_resp_cpr;
+ memcpy(req.encap_resp, encap_resp, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+ goto fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_reject_fwd_resp_input req = {0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+ goto fwd_err_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_err_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_exec_fwd_resp_input req = {0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+ goto exec_fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+exec_fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+ struct hwrm_cfa_l2_filter_alloc_input *req =
+ (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+ if (!is_valid_ether_addr(vf->mac_addr) ||
+ ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ else
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+
+ if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+ /* real link */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+ } else {
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+ phy_qcfg_req =
+ (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+ sizeof(phy_qcfg_resp));
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+ if (vf->flags & BNXT_VF_LINK_UP) {
+ /* if physical link is down, force link up on VF */
+ if (phy_qcfg_resp.link ==
+ PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+ phy_qcfg_resp.link =
+ PORT_PHY_QCFG_RESP_LINK_LINK;
+ if (phy_qcfg_resp.auto_link_speed)
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.auto_link_speed;
+ else
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.force_link_speed;
+ phy_qcfg_resp.duplex =
+ PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+ phy_qcfg_resp.pause =
+ (PORT_PHY_QCFG_RESP_PAUSE_TX |
+ PORT_PHY_QCFG_RESP_PAUSE_RX);
+ }
+ } else {
+ /* force link down */
+ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+ phy_qcfg_resp.link_speed = 0;
+ phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+ phy_qcfg_resp.pause = 0;
+ }
+ rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+ phy_qcfg_req->resp_addr,
+ phy_qcfg_req->cmpl_ring,
+ sizeof(phy_qcfg_resp));
+ }
+ return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+ struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
+ u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+
+ switch (req_type) {
+ case HWRM_CFA_L2_FILTER_ALLOC:
+ rc = bnxt_vf_validate_set_mac(bp, vf);
+ break;
+ case HWRM_FUNC_CFG:
+ /* TODO Validate if VF is allowed to change mac address,
+ * mtu, num of rings etc
+ */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_func_cfg_input));
+ break;
+ case HWRM_PORT_PHY_QCFG:
+ rc = bnxt_vf_set_link(bp, vf);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+ /* Scan through VF's and process commands */
+ while (1) {
+ vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+ if (vf_id >= active_vfs)
+ break;
+
+ clear_bit(vf_id, bp->pf.vf_event_bmap);
+ bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+ i = vf_id + 1;
+ }
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ goto update_vf_mac_exit;
+
+ if (!is_valid_ether_addr(resp->perm_mac_address))
+ goto update_vf_mac_exit;
+
+ if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+ goto update_vf_mac_exit;
+
+ memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
new file mode 100644
index 0000000..c151280
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+void bnxt_update_vf_mac(struct bnxt *);
+#endif
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 9b35d14..8fb84e6 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -3,7 +3,7 @@
#
config NET_VENDOR_CAVIUM
- tristate "Cavium ethernet drivers"
+ bool "Cavium ethernet drivers"
depends on PCI
default y
---help---
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 02e4e02..a077f94 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -35,79 +35,79 @@
}
static const char stats_strings[][ETH_GSTRING_LEN] = {
- "TxOctetsOK ",
- "TxFramesOK ",
- "TxBroadcastFrames ",
- "TxMulticastFrames ",
- "TxUnicastFrames ",
- "TxErrorFrames ",
+ "tx_octets_ok ",
+ "tx_frames_ok ",
+ "tx_broadcast_frames ",
+ "tx_multicast_frames ",
+ "tx_unicast_frames ",
+ "tx_error_frames ",
- "TxFrames64 ",
- "TxFrames65To127 ",
- "TxFrames128To255 ",
- "TxFrames256To511 ",
- "TxFrames512To1023 ",
- "TxFrames1024To1518 ",
- "TxFrames1519ToMax ",
+ "tx_frames_64 ",
+ "tx_frames_65_to_127 ",
+ "tx_frames_128_to_255 ",
+ "tx_frames_256_to_511 ",
+ "tx_frames_512_to_1023 ",
+ "tx_frames_1024_to_1518 ",
+ "tx_frames_1519_to_max ",
- "TxFramesDropped ",
- "TxPauseFrames ",
- "TxPPP0Frames ",
- "TxPPP1Frames ",
- "TxPPP2Frames ",
- "TxPPP3Frames ",
- "TxPPP4Frames ",
- "TxPPP5Frames ",
- "TxPPP6Frames ",
- "TxPPP7Frames ",
+ "tx_frames_dropped ",
+ "tx_pause_frames ",
+ "tx_ppp0_frames ",
+ "tx_ppp1_frames ",
+ "tx_ppp2_frames ",
+ "tx_ppp3_frames ",
+ "tx_ppp4_frames ",
+ "tx_ppp5_frames ",
+ "tx_ppp6_frames ",
+ "tx_ppp7_frames ",
- "RxOctetsOK ",
- "RxFramesOK ",
- "RxBroadcastFrames ",
- "RxMulticastFrames ",
- "RxUnicastFrames ",
+ "rx_octets_ok ",
+ "rx_frames_ok ",
+ "rx_broadcast_frames ",
+ "rx_multicast_frames ",
+ "rx_unicast_frames ",
- "RxFramesTooLong ",
- "RxJabberErrors ",
- "RxFCSErrors ",
- "RxLengthErrors ",
- "RxSymbolErrors ",
- "RxRuntFrames ",
+ "rx_frames_too_long ",
+ "rx_jabber_errors ",
+ "rx_fcs_errors ",
+ "rx_length_errors ",
+ "rx_symbol_errors ",
+ "rx_runt_frames ",
- "RxFrames64 ",
- "RxFrames65To127 ",
- "RxFrames128To255 ",
- "RxFrames256To511 ",
- "RxFrames512To1023 ",
- "RxFrames1024To1518 ",
- "RxFrames1519ToMax ",
+ "rx_frames_64 ",
+ "rx_frames_65_to_127 ",
+ "rx_frames_128_to_255 ",
+ "rx_frames_256_to_511 ",
+ "rx_frames_512_to_1023 ",
+ "rx_frames_1024_to_1518 ",
+ "rx_frames_1519_to_max ",
- "RxPauseFrames ",
- "RxPPP0Frames ",
- "RxPPP1Frames ",
- "RxPPP2Frames ",
- "RxPPP3Frames ",
- "RxPPP4Frames ",
- "RxPPP5Frames ",
- "RxPPP6Frames ",
- "RxPPP7Frames ",
+ "rx_pause_frames ",
+ "rx_ppp0_frames ",
+ "rx_ppp1_frames ",
+ "rx_ppp2_frames ",
+ "rx_ppp3_frames ",
+ "rx_ppp4_frames ",
+ "rx_ppp5_frames ",
+ "rx_ppp6_frames ",
+ "rx_ppp7_frames ",
- "RxBG0FramesDropped ",
- "RxBG1FramesDropped ",
- "RxBG2FramesDropped ",
- "RxBG3FramesDropped ",
- "RxBG0FramesTrunc ",
- "RxBG1FramesTrunc ",
- "RxBG2FramesTrunc ",
- "RxBG3FramesTrunc ",
+ "rx_bg0_frames_dropped ",
+ "rx_bg1_frames_dropped ",
+ "rx_bg2_frames_dropped ",
+ "rx_bg3_frames_dropped ",
+ "rx_bg0_frames_trunc ",
+ "rx_bg1_frames_trunc ",
+ "rx_bg2_frames_trunc ",
+ "rx_bg3_frames_trunc ",
- "TSO ",
- "TxCsumOffload ",
- "RxCsumGood ",
- "VLANextractions ",
- "VLANinsertions ",
- "GROpackets ",
- "GROmerged ",
+ "tso ",
+ "tx_csum_offload ",
+ "rx_csum_good ",
+ "vlan_extractions ",
+ "vlan_insertions ",
+ "gro_packets ",
+ "gro_merged ",
};
static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
@@ -211,8 +211,11 @@
sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
+ info->regdump_len = get_regs_len(dev);
- if (adapter->params.fw_vers)
+ if (!adapter->params.fw_vers)
+ strcpy(info->fw_version, "N/A");
+ else
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -612,6 +615,8 @@
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_cfg;
u32 speed = ethtool_cmd_speed(cmd);
+ struct link_config old_lc;
+ int ret;
if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
return -EINVAL;
@@ -626,13 +631,11 @@
return -EINVAL;
}
+ old_lc = *lc;
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) ||
- (speed == 1000) ||
- (speed == 10000) ||
- (speed == 40000))
+ if (!(lc->supported & cap))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -645,10 +648,14 @@
}
lc->autoneg = cmd->autoneg;
- if (netif_running(dev))
- return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
- lc);
- return 0;
+ /* If the firmware rejects the Link Configuration request, back out
+ * the changes and report the error.
+ */
+ ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+ if (ret)
+ *lc = old_lc;
+
+ return ret;
}
static void get_pauseparam(struct net_device *dev,
@@ -847,7 +854,7 @@
{
int i, err = 0;
struct adapter *adapter = netdev2adap(dev);
- u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+ u8 *buf = t4_alloc_mem(EEPROMSIZE);
if (!buf)
return -ENOMEM;
@@ -858,7 +865,7 @@
if (!err)
memcpy(data, buf + e->offset, e->len);
- kfree(buf);
+ t4_free_mem(buf);
return err;
}
@@ -887,7 +894,7 @@
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/* RMW possibly needed for first or last words.
*/
- buf = kmalloc(aligned_len, GFP_KERNEL);
+ buf = t4_alloc_mem(aligned_len);
if (!buf)
return -ENOMEM;
err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
@@ -915,7 +922,7 @@
err = t4_seeprom_wp(adapter, true);
out:
if (buf != data)
- kfree(buf);
+ t4_free_mem(buf);
return err;
}
@@ -1011,11 +1018,15 @@
if (!p)
return 0;
- for (i = 0; i < pi->rss_size; i++)
- pi->rss[i] = p[i];
- if (pi->adapter->flags & FULL_INIT_DONE)
+ /* Interface must be brought up atleast once */
+ if (pi->adapter->flags & FULL_INIT_DONE) {
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
+
return cxgb4_write_rss(pi, pi->rss);
- return 0;
+ }
+
+ return -EPERM;
}
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c29227e..2cf8185 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -83,7 +83,7 @@
#endif
#define DRV_VERSION "2.0.0-ko"
const char cxgb4_driver_version[] = DRV_VERSION;
-#define DRV_DESC "Chelsio T4/T5 Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
@@ -151,6 +151,7 @@
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
+MODULE_FIRMWARE(FW6_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
@@ -4485,6 +4486,10 @@
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
+ dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
+ "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
+ allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+ s->rdmaciqs);
kfree(entries);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b2b5e5b..0cfa5d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -56,7 +56,7 @@
* Generic information about the driver.
*/
#define DRV_VERSION "2.0.0-ko"
-#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
/*
* Module Parameters.
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7f5389c..47f0400 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -107,7 +107,7 @@
#include "gianfar.h"
-#define TX_TIMEOUT (1*HZ)
+#define TX_TIMEOUT (5*HZ)
const char gfar_driver_version[] = "2.0";
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 9d3bb83..b364529 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -439,10 +439,7 @@
static int __init hnae_init(void)
{
hnae_class = class_create(THIS_MODULE, "hnae");
- if (IS_ERR(hnae_class))
- return PTR_ERR(hnae_class);
-
- return 0;
+ return PTR_ERR_OR_ZERO(hnae_class);
}
static void __exit hnae_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index d611388..523e9b8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -64,17 +64,10 @@
switch (status) {
case HNAE_LED_ACTIVE:
mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
- return 2;
- case HNAE_LED_ON:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_ON_VALUE);
dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
- break;
- case HNAE_LED_OFF:
- dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
- CPLD_LED_DEFAULT_VALUE);
- dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
- break;
+ return 2;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_DEFAULT_VALUE);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index e4ec52a..37491c8 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -11,6 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
@@ -20,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/spinlock_types.h>
#define MDIO_DRV_NAME "Hi-HNS_MDIO"
@@ -36,7 +38,7 @@
struct hns_mdio_device {
void *vbase; /* mdio reg base address */
- void *sys_vbase;
+ struct regmap *subctrl_vbase;
};
/* mdio reg */
@@ -155,10 +157,10 @@
u32 time_cnt;
u32 reg_value;
- mdio_write_reg((void *)mdio_dev->sys_vbase, cfg_reg, set_val);
+ regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
- reg_value = mdio_read_reg((void *)mdio_dev->sys_vbase, st_reg);
+ regmap_read(mdio_dev->subctrl_vbase, st_reg, ®_value);
reg_value &= st_msk;
if ((!!check_st) == (!!reg_value))
break;
@@ -352,7 +354,7 @@
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
int ret;
- if (!mdio_dev->sys_vbase) {
+ if (!mdio_dev->subctrl_vbase) {
dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
return -ENODEV;
}
@@ -455,13 +457,12 @@
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mdio_dev->sys_vbase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mdio_dev->sys_vbase)) {
- ret = PTR_ERR(mdio_dev->sys_vbase);
- return ret;
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0));
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
+ mdio_dev->subctrl_vbase = NULL;
}
-
new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR,
sizeof(int), GFP_KERNEL);
if (!new_bus->irq)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7a58b1f..4dd3e26 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -109,8 +109,10 @@
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
-#define I40E_OEM_VER_BUILD_MASK 0xff00
+#define I40E_OEM_VER_BUILD_MASK 0xffff
#define I40E_OEM_VER_PATCH_MASK 0xff
+#define I40E_OEM_VER_BUILD_SHIFT 8
+#define I40E_OEM_VER_SHIFT 24
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -468,6 +470,8 @@
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
+ /* Per VSI lock to protect elements/list (MAC filter) */
+ spinlock_t mac_filter_list_lock;
struct list_head mac_filter_list;
/* VSI stats */
@@ -575,6 +579,8 @@
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+#define ITR_COUNTDOWN_START 100
+ u8 itr_countdown; /* when 0 should adjust ITR */
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -590,6 +596,15 @@
static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
{
static char buf[32];
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+ build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
+ & I40E_OEM_VER_BUILD_MASK);
+ patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
snprintf(buf, sizeof(buf),
"%x.%02x 0x%x %d.%d.%d",
@@ -597,9 +612,7 @@
I40E_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
I40E_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack, (hw->nvm.oem_ver >> 24),
- (hw->nvm.oem_ver & I40E_OEM_VER_BUILD_MASK) >> 8,
- hw->nvm.oem_ver & I40E_OEM_VER_PATCH_MASK);
+ hw->nvm.eetrack, ver, build, patch);
return buf;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index daa6b17..2d74c6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -331,25 +331,11 @@
len = buf_len;
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, buf[i], buf[i + 1], buf[i + 2],
- buf[i + 3], buf[i + 4], buf[i + 5],
- buf[i + 6], buf[i + 7], buf[i + 8],
- buf[i + 9], buf[i + 10], buf[i + 11],
- buf[i + 12], buf[i + 13], buf[i + 14],
- buf[i + 15]);
+ i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
/* write whatever's left over without overrunning the buffer */
- if (i < len) {
- char d_buf[80];
- int j = 0;
-
- memset(d_buf, 0, sizeof(d_buf));
- j += sprintf(d_buf, "\t0x%04X ", i);
- while (i < len)
- j += sprintf(&d_buf[j], " %02X", buf[i++]);
- i40e_debug(hw, mask, "%s\n", d_buf);
- }
+ if (i < len)
+ i40e_debug(hw, mask, "\t0x%04X %*ph\n",
+ i, len - i, buf + i);
}
}
@@ -958,6 +944,9 @@
else
hw->pf_id = (u8)(func_rid & 0x7);
+ if (hw->mac.type == I40E_MAC_X722)
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -2275,13 +2264,15 @@
if (status)
return status;
- status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
- NULL);
- if (status)
- return status;
+ if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ status = i40e_aq_get_phy_capabilities(hw, false, false,
+ &abilities, NULL);
+ if (status)
+ return status;
- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
- sizeof(hw->phy.link_info.module_type));
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+ }
return status;
}
@@ -3824,6 +3815,28 @@
}
/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 seid)
+{
+ u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+ i40e_status status;
+
+ status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ seid, 0, true, NULL,
+ NULL);
+ if (status)
+ hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
* i40e_aq_alternate_read
* @hw: pointer to the hardware structure
* @reg_addr0: address of first dword to be read
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d15bd62..d4b7af9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1137,7 +1137,9 @@
goto command_write_done;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, ma, vlan, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi, true);
if (f && !ret)
dev_info(&pf->pdev->dev,
@@ -1174,7 +1176,9 @@
goto command_write_done;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, ma, vlan, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi, true);
if (!ret)
dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 767b1db..3f385ff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -661,28 +661,31 @@
/* Check autoneg */
if (autoneg == AUTONEG_ENABLE) {
- /* If autoneg is not supported, return error */
- if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
- netdev_info(netdev, "Autoneg not supported on this phy\n");
- return -EINVAL;
- }
/* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+ /* If autoneg is not supported, return error */
+ if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+ netdev_info(netdev, "Autoneg not supported on this phy\n");
+ return -EINVAL;
+ }
+ /* Autoneg is allowed to change */
config.abilities = abilities.abilities |
I40E_AQ_PHY_ENABLE_AN;
change = true;
}
} else {
- /* If autoneg is supported 10GBASE_T is the only phy that
- * can disable it, so otherwise return error
- */
- if (safe_ecmd.supported & SUPPORTED_Autoneg &&
- hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
- netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
- return -EINVAL;
- }
/* If autoneg is currently enabled */
if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+ /* If autoneg is supported 10GBASE_T is the only PHY
+ * that can disable it, so otherwise return error
+ */
+ if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+ hw->phy.link_info.phy_type !=
+ I40E_PHY_TYPE_10GBASE_T) {
+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ return -EINVAL;
+ }
+ /* Autoneg is allowed to change */
config.abilities = abilities.abilities &
~I40E_AQ_PHY_ENABLE_AN;
change = true;
@@ -748,9 +751,9 @@
status = i40e_update_link_info(hw);
if (status)
- netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -1403,6 +1406,12 @@
data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+ data[i++] = veb->tc_stats.tc_tx_packets[j];
+ data[i++] = veb->tc_stats.tc_tx_bytes[j];
+ data[i++] = veb->tc_stats.tc_rx_packets[j];
+ data[i++] = veb->tc_stats.tc_rx_bytes[j];
+ }
}
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 2ec2411..fe5d9bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1516,10 +1516,12 @@
* same PCI function.
*/
netdev->dev_port = 1;
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f7ed313..b825f97 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 34
+#define DRV_VERSION_BUILD 46
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1355,6 +1355,9 @@
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
**/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1413,6 +1416,9 @@
* @vlan: the vlan
* @is_vf: make sure it's a VF filter, else doesn't matter
* @is_netdev: make sure it's a netdev filter, else doesn't matter
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
**/
void i40e_del_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1519,8 +1525,10 @@
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
} else {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
@@ -1531,10 +1539,12 @@
element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
} else {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
false, false);
if (f)
f->is_laa = true;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
i40e_sync_vsi_filters(vsi, false);
@@ -1707,6 +1717,8 @@
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
if (!i40e_find_mac(vsi, uca->addr, false, true)) {
@@ -1754,6 +1766,7 @@
bottom_of_search_loop:
continue;
}
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1763,6 +1776,79 @@
}
/**
+ * i40e_mac_filter_entry_clone - Clones a MAC filter entry
+ * @src: source MAC filter entry to be clones
+ *
+ * Returns the pointer to newly cloned MAC filter entry or NULL
+ * in case of error
+ **/
+static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
+ struct i40e_mac_filter *src)
+{
+ struct i40e_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+ *f = *src;
+
+ INIT_LIST_HEAD(&f->list);
+
+ return f;
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be removed from device.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+ struct list_head *from)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ f->changed = true;
+ /* Move the element back into MAC filter list*/
+ list_move_tail(&f->list, &vsi->mac_filter_list);
+ }
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ *
+ * MAC filter entries from list were slated to be added from device.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed && f->counter)
+ f->changed = true;
+ }
+}
+
+/**
+ * i40e_cleanup_add_list - Deletes the element from add list and release
+ * memory
+ * @add_list: Pointer to list which contains MAC filter entries
+ **/
+static void i40e_cleanup_add_list(struct list_head *add_list)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, add_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
* @grab_rtnl: whether RTNL needs to be grabbed
@@ -1773,11 +1859,13 @@
**/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct list_head tmp_del_list, tmp_add_list;
+ struct i40e_mac_filter *f, *ftmp, *fclone;
bool promisc_forced_on = false;
bool add_happened = false;
int filter_list_len = 0;
u32 changed_flags = 0;
+ bool err_cond = false;
i40e_status ret = 0;
struct i40e_pf *pf;
int num_add = 0;
@@ -1798,17 +1886,13 @@
vsi->current_netdev_flags = vsi->netdev->flags;
}
+ INIT_LIST_HEAD(&tmp_del_list);
+ INIT_LIST_HEAD(&tmp_add_list);
+
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
- filter_list_len = pf->hw.aq.asq_buf_size /
- sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
- GFP_KERNEL);
- if (!del_list)
- return -ENOMEM;
-
+ spin_lock_bh(&vsi->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
if (!f->changed)
continue;
@@ -1816,6 +1900,58 @@
if (f->counter != 0)
continue;
f->changed = false;
+
+ /* Move the element into temporary del_list */
+ list_move_tail(&f->list, &tmp_del_list);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter == 0)
+ continue;
+ f->changed = false;
+
+ /* Clone MAC filter entry and add into temporary list */
+ fclone = i40e_mac_filter_entry_clone(f);
+ if (!fclone) {
+ err_cond = true;
+ break;
+ }
+ list_add_tail(&fclone->list, &tmp_add_list);
+ }
+
+ /* if failed to clone MAC filter entry - undo */
+ if (err_cond) {
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi);
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ if (err_cond)
+ i40e_cleanup_add_list(&tmp_add_list);
+ }
+
+ /* Now process 'del_list' outside the lock */
+ if (!list_empty(&tmp_del_list)) {
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_remove_macvlan_element_data),
+ GFP_KERNEL);
+ if (!del_list) {
+ i40e_cleanup_add_list(&tmp_add_list);
+
+ /* Undo VSI's MAC filter entry element updates */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
cmd_flags = 0;
/* add to delete list */
@@ -1828,10 +1964,6 @@
del_list[num_del].flags = cmd_flags;
num_del++;
- /* unlink from filter list */
- list_del(&f->list);
- kfree(f);
-
/* flush a full buffer */
if (num_del == filter_list_len) {
ret = i40e_aq_remove_macvlan(&pf->hw,
@@ -1842,12 +1974,18 @@
memset(del_list, 0, sizeof(*del_list));
if (ret && aq_err != I40E_AQ_RC_ENOENT)
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, aq_err));
+ dev_err(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
+ /* Release memory for MAC filter entries which were
+ * synced up with HW.
+ */
+ list_del(&f->list);
+ kfree(f);
}
+
if (num_del) {
ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
del_list, num_del, NULL);
@@ -1863,6 +2001,9 @@
kfree(del_list);
del_list = NULL;
+ }
+
+ if (!list_empty(&tmp_add_list)) {
/* do all the adds now */
filter_list_len = pf->hw.aq.asq_buf_size /
@@ -1870,16 +2011,19 @@
add_list = kcalloc(filter_list_len,
sizeof(struct i40e_aqc_add_macvlan_element_data),
GFP_KERNEL);
- if (!add_list)
+ if (!add_list) {
+ /* Purge element from temporary lists */
+ i40e_cleanup_add_list(&tmp_add_list);
+
+ /* Undo add filter entries from VSI MAC filter list */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_undo_add_filter_entries(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
+ }
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
+ list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
- if (f->counter == 0)
- continue;
- f->changed = false;
add_happened = true;
cmd_flags = 0;
@@ -1906,7 +2050,13 @@
break;
memset(add_list, 0, sizeof(*add_list));
}
+ /* Entries from tmp_add_list were cloned from MAC
+ * filter list, hence clean those cloned entries
+ */
+ list_del(&f->list);
+ kfree(f);
}
+
if (num_add) {
ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
add_list, num_add, NULL);
@@ -2158,6 +2308,9 @@
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
+ /* Locked once because all functions invoked below iterates list*/
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
if (is_netdev) {
add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
is_vf, is_netdev);
@@ -2165,6 +2318,7 @@
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, vsi->netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2175,6 +2329,7 @@
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2196,6 +2351,7 @@
dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n",
vsi->netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2204,22 +2360,28 @@
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev)) {
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr,
- 0, is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- f->macaddr);
- return -ENOMEM;
- }
+ if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev))
+ continue;
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr,
+ 0, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ return -ENOMEM;
}
}
}
+ /* Make sure to release before sync_vsi_filter because that
+ * function will lock/unlock as necessary
+ */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
@@ -2244,6 +2406,9 @@
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
+ /* Locked once because all functions invoked below iterates list */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
if (is_netdev)
i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
@@ -2274,6 +2439,7 @@
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2282,16 +2448,22 @@
list_for_each_entry(f, &vsi->mac_filter_list, list) {
i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
+ is_vf, is_netdev);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
}
+ /* Make sure to release before sync_vsi_filter because that
+ * function with lock/unlock as necessary
+ */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
@@ -2915,6 +3087,7 @@
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -3010,6 +3183,7 @@
u32 val;
/* set the ITR configuration */
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
@@ -3095,7 +3269,7 @@
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -3264,6 +3438,8 @@
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
/* temporarily disable queue cause for NAPI processing */
u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
@@ -3276,7 +3452,7 @@
wr32(hw, I40E_QINT_TQCTL(0), qval);
if (!test_bit(__I40E_DOWN, &pf->state))
- napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
+ napi_schedule_irqoff(&q_vector->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -6661,6 +6837,15 @@
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
@@ -7062,6 +7247,8 @@
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+ /* Initialize VSI lock */
+ spin_lock_init(&vsi->mac_filter_list_lock);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
goto unlock_pf;
@@ -7967,6 +8154,7 @@
if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
#ifdef I40E_FCOE
@@ -8364,7 +8552,7 @@
/**
* i40e_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
- * @netdev: This physical port's netdev
+ * @dev: This physical port's netdev
* @features: Offload features that the stack believes apply
**/
static netdev_features_t i40e_features_check(struct sk_buff *skb,
@@ -8479,17 +8667,26 @@
* default a MAC-VLAN filter that accepts any tagged packet
* which must be replaced by a normal filter.
*/
- if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+ if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, mac_addr,
I40E_VLAN_ANY, false, true);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ }
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr);
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -8545,12 +8742,22 @@
return 1;
veb = pf->veb[vsi->veb_idx];
- /* Uplink is a bridge in VEPA mode */
- if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
- return 0;
+ if (!veb) {
+ dev_info(&pf->pdev->dev,
+ "There is no veb associated with the bridge\n");
+ return -ENOENT;
+ }
- /* Uplink is a bridge in VEB mode */
- return 1;
+ /* Uplink is a bridge in VEPA mode */
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
+ return 0;
+ } else {
+ /* Uplink is a bridge in VEB mode */
+ return 1;
+ }
+
+ /* VEPA is now default bridge, so return 0 */
+ return 0;
}
/**
@@ -8563,10 +8770,13 @@
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
- struct i40e_mac_filter *f, *ftmp;
+ u8 laa_macaddr[ETH_ALEN];
+ bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
+ struct i40e_mac_filter *f, *ftmp;
+
u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0;
@@ -8738,32 +8948,41 @@
vsi->id = ctxt.vsi_number;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
f->changed = true;
f_count++;
+ /* Expected to have only one MAC filter entry for LAA in list */
if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
- struct i40e_aqc_remove_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, f->macaddr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- if (ret) {
- /* some older FW has a different default */
- element.flags |=
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- }
-
- i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- f->macaddr, NULL);
+ ether_addr_copy(laa_macaddr, f->macaddr);
+ found_laa_mac_filter = true;
}
}
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ if (found_laa_mac_filter) {
+ struct i40e_aqc_remove_macvlan_element_data element;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, laa_macaddr);
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
+ if (ret) {
+ /* some older FW has a different default */
+ element.flags |=
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
+ }
+
+ i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_WOL,
+ laa_macaddr, NULL);
+ }
+
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -8826,9 +9045,12 @@
i40e_vsi_disable_irq(vsi);
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan,
f->is_vf, f->is_netdev);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
i40e_sync_vsi_filters(vsi, false);
i40e_vsi_delete(vsi);
@@ -9959,9 +10181,10 @@
static u16 pfs_found;
u16 wol_nvm_bits;
u16 link_status;
- int err = 0;
+ int err;
u32 len;
u32 i;
+ u8 set_fc_aq_fail;
err = pci_enable_device_mem(pdev);
if (err)
@@ -10226,6 +10449,25 @@
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+
+ /* Make sure flow control is set according to current settings */
+ err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on get_phy_cap\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on set_phy_config\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on get_link_info\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -10383,6 +10625,15 @@
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
/* print a string summarizing features */
i40e_print_features(pf);
@@ -10430,6 +10681,7 @@
static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
i40e_status ret_code;
int i;
@@ -10437,6 +10689,10 @@
i40e_ptp_stop(pf);
+ /* Disable RSS in hw */
+ wr32(hw, I40E_PFQF_HENA(0), 0);
+ wr32(hw, I40E_PFQF_HENA(1), 0);
+
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 875a8cc..6100cdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -290,9 +290,18 @@
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- if (hw->mac.type == I40E_MAC_X722)
- return i40e_read_nvm_word_aq(hw, offset, data);
- return i40e_read_nvm_word_srctl(hw, offset, data);
+ enum i40e_status_code ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ }
+ return ret_code;
}
/**
@@ -397,9 +406,19 @@
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
- if (hw->mac.type == I40E_MAC_X722)
- return i40e_read_nvm_buffer_aq(hw, offset, words, data);
- return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ enum i40e_status_code ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+ return ret_code;
}
/**
@@ -465,7 +484,7 @@
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
- i40e_status ret_code = 0;
+ i40e_status ret_code;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
@@ -545,15 +564,16 @@
**/
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ i40e_status ret_code;
u16 checksum;
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- le_sum = cpu_to_le16(checksum);
- if (!ret_code)
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
+ }
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 10bf2ba..bb9d583 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -322,4 +322,6 @@
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 512707c..635b3ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -815,6 +815,8 @@
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
+ * Returns true if ITR changed, false if not
+ *
* Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic
@@ -823,21 +825,32 @@
* testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
+ struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr;
int bytes_per_int;
+ int usecs;
if (rc->total_packets == 0 || !rc->itr)
- return;
+ return false;
/* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
+ * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (18000 ints/s)
+ * > 40000 Rx packets per second (8000 ints/s)
+ *
+ * The math works out because the divisor is in 10^(-6) which
+ * turns the bytes/us input value into MB/s values, but
+ * make sure to use usecs, as the register values written
+ * are in 2 usec increments in the ITR registers, and make sure
+ * to use the smoothed values that the countdown timer gives us.
*/
- bytes_per_int = rc->total_bytes / rc->itr;
+ usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+ bytes_per_int = rc->total_bytes / usecs;
+
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
@@ -850,35 +863,52 @@
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
- if (bytes_per_int <= 20)
- new_latency_range = I40E_LOW_LATENCY;
- break;
+ case I40E_ULTRA_LATENCY:
default:
if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY;
break;
}
+
+ /* this is to adjust RX more aggressively when streaming small
+ * packets. The value of 40000 was picked as it is just beyond
+ * what the hardware can receive per second if in low latency
+ * mode.
+ */
+#define RX_ULTRA_PACKET_RATE 40000
+
+ if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+ (&qv->rx == rc))
+ new_latency_range = I40E_ULTRA_LATENCY;
+
rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_100K;
+ new_itr = I40E_ITR_50K;
break;
case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K;
break;
case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_18K;
+ break;
+ case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default:
break;
}
- if (new_itr != rc->itr)
- rc->itr = new_itr;
-
rc->total_bytes = 0;
rc->total_packets = 0;
+
+ if (new_itr != rc->itr) {
+ rc->itr = new_itr;
+ return true;
+ }
+
+ return false;
}
/**
@@ -1747,6 +1777,21 @@
return total_rx_packets;
}
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+ return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about
@@ -1757,54 +1802,69 @@
struct i40e_q_vector *q_vector)
{
struct i40e_hw *hw = &vsi->back->hw;
- u16 old_itr;
+ bool rx = false, tx = false;
+ u32 rxval, txval;
int vector;
- u32 val;
vector = (q_vector->v_idx + vsi->base_vector);
+
+ /* avoid dynamic calculation if in countdown mode OR if
+ * all dynamic is disabled
+ */
+ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+ if (q_vector->itr_countdown > 0 ||
+ (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ goto enable_int;
+ }
+
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr) {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_RX_ITR <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (q_vector->rx.itr <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
- } else {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- } else {
- i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
+ rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
+
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr) {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_TX_ITR <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (q_vector->tx.itr <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
- } else {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
- vsi->base_vector - 1), val);
- } else {
- i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
+ tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+ txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
+
+ if (rx || tx) {
+ /* get the higher of the two ITR adjustments and
+ * use the same value for both ITR registers
+ * when in adaptive mode (Rx and/or Tx)
+ */
+ u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+ q_vector->tx.itr = q_vector->rx.itr = itr;
+ txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+ tx = true;
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+ rx = true;
+ }
+
+ /* only need to enable the interrupt once, but need
+ * to possibly update both ITR values
+ */
+ if (rx) {
+ /* set the INTENA_MSK_MASK so that this first write
+ * won't actually enable the interrupt, instead just
+ * updating the ITR (it's bit 31 PF and VF)
+ */
+ rxval |= BIT(31);
+ /* don't check _DOWN because interrupt isn't being enabled */
+ wr32(hw, INTREG(vector - 1), rxval);
+ }
+
+enable_int:
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, INTREG(vector - 1), txval);
+
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ else
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
}
/**
@@ -2127,6 +2187,7 @@
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
+ * @cd_type_cmd_tso_mss: ptr to u64 object
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
@@ -2186,6 +2247,7 @@
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @tx_flags: the collected send information
+ * @cd_type_cmd_tso_mss: ptr to u64 object
*
* Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
**/
@@ -2228,6 +2290,7 @@
* @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 7c0ed84..6779fb7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -32,12 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
+#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019
+#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF I40E_ITR_8K
-#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_RX_DEF I40E_ITR_20K
+#define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -296,6 +298,7 @@
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2,
+ I40E_ULTRA_LATENCY = 3,
};
struct i40e_ring_container {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 9c4a457..dd2da35 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -544,6 +544,9 @@
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+ u64 flags;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
@@ -1065,8 +1068,8 @@
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
- BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index a35a204..44462b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -547,6 +547,8 @@
*/
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
@@ -559,6 +561,7 @@
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
/* program mac filter */
@@ -941,6 +944,7 @@
if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
pf->num_alloc_vfs = 0;
goto err_iov;
}
@@ -1598,6 +1602,11 @@
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ /* Lock once, because all function inside for loop accesses VSI's
+ * MAC filter list which needs to be protected using same lock.
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
@@ -1616,9 +1625,11 @@
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
}
}
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
if (i40e_sync_vsi_filters(vsi, false))
@@ -1666,10 +1677,12 @@
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ spin_lock_bh(&vsi->mac_filter_list_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
i40e_del_filter(vsi, al->list[i].addr,
I40E_VLAN_ANY, true, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
if (i40e_sync_vsi_filters(vsi, false))
@@ -2066,6 +2079,11 @@
goto error_param;
}
+ /* Lock once because below invoked function add/del_filter requires
+ * mac_filter_list_lock to be held
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* delete the temporary mac address */
i40e_del_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1,
@@ -2077,6 +2095,8 @@
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
if (i40e_sync_vsi_filters(vsi, false)) {
@@ -2109,6 +2129,7 @@
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ bool is_vsi_in_vlan = false;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
@@ -2138,7 +2159,11 @@
/* duplicate request, so just return success */
goto error_pvid;
- if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7435fb3..72b1942 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -331,25 +331,11 @@
len = buf_len;
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, buf[i], buf[i + 1], buf[i + 2],
- buf[i + 3], buf[i + 4], buf[i + 5],
- buf[i + 6], buf[i + 7], buf[i + 8],
- buf[i + 9], buf[i + 10], buf[i + 11],
- buf[i + 12], buf[i + 13], buf[i + 14],
- buf[i + 15]);
+ i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
/* write whatever's left over without overrunning the buffer */
- if (i < len) {
- char d_buf[80];
- int j = 0;
-
- memset(d_buf, 0, sizeof(d_buf));
- j += sprintf(d_buf, "\t0x%04X ", i);
- while (i < len)
- j += sprintf(&d_buf[j], " %02X", buf[i++]);
- i40e_debug(hw, mask, "%s\n", d_buf);
- }
+ if (i < len)
+ i40e_debug(hw, mask, "\t0x%04X %*ph\n",
+ i, len - i, buf + i);
}
}
@@ -443,9 +429,6 @@
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
- cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
- cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
-
status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
@@ -520,8 +503,6 @@
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
- cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
- cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 8ed0edf..cbd9a1b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -101,4 +101,6 @@
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 97493a4..47e9a90 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -318,6 +318,8 @@
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
+ * Returns true if ITR changed, false if not
+ *
* Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic
@@ -326,21 +328,32 @@
* testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
+ struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr;
int bytes_per_int;
+ int usecs;
if (rc->total_packets == 0 || !rc->itr)
- return;
+ return false;
/* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
+ * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (18000 ints/s)
+ * > 40000 Rx packets per second (8000 ints/s)
+ *
+ * The math works out because the divisor is in 10^(-6) which
+ * turns the bytes/us input value into MB/s values, but
+ * make sure to use usecs, as the register values written
+ * are in 2 usec increments in the ITR registers, and make sure
+ * to use the smoothed values that the countdown timer gives us.
*/
- bytes_per_int = rc->total_bytes / rc->itr;
+ usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+ bytes_per_int = rc->total_bytes / usecs;
+
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
@@ -353,35 +366,52 @@
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
- if (bytes_per_int <= 20)
- new_latency_range = I40E_LOW_LATENCY;
- break;
+ case I40E_ULTRA_LATENCY:
default:
if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY;
break;
}
+
+ /* this is to adjust RX more aggressively when streaming small
+ * packets. The value of 40000 was picked as it is just beyond
+ * what the hardware can receive per second if in low latency
+ * mode.
+ */
+#define RX_ULTRA_PACKET_RATE 40000
+
+ if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+ (&qv->rx == rc))
+ new_latency_range = I40E_ULTRA_LATENCY;
+
rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_100K;
+ new_itr = I40E_ITR_50K;
break;
case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K;
break;
case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_18K;
+ break;
+ case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default:
break;
}
- if (new_itr != rc->itr)
- rc->itr = new_itr;
-
rc->total_bytes = 0;
rc->total_packets = 0;
+
+ if (new_itr != rc->itr) {
+ rc->itr = new_itr;
+ return true;
+ }
+
+ return false;
}
/*
@@ -1187,6 +1217,21 @@
return total_rx_packets;
}
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+ u32 val;
+
+ val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+ return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_VFINT_DYN_CTLN1
+
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about
@@ -1197,55 +1242,67 @@
struct i40e_q_vector *q_vector)
{
struct i40e_hw *hw = &vsi->back->hw;
- u16 old_itr;
+ bool rx = false, tx = false;
+ u32 rxval, txval;
int vector;
- u32 val;
vector = (q_vector->v_idx + vsi->base_vector);
+
+ /* avoid dynamic calculation if in countdown mode OR if
+ * all dynamic is disabled
+ */
+ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+ if (q_vector->itr_countdown > 0 ||
+ (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ goto enable_int;
+ }
+
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr) {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_RX_ITR <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
- (q_vector->rx.itr <<
- I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
- } else {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
- } else {
- i40evf_irq_enable_queues(vsi->back, 1
- << q_vector->v_idx);
+ rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr) {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_TX_ITR <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
- (q_vector->tx.itr <<
- I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
-
- } else {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
- } else {
- i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+ tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+ txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
+ if (rx || tx) {
+ /* get the higher of the two ITR adjustments and
+ * use the same value for both ITR registers
+ * when in adaptive mode (Rx and/or Tx)
+ */
+ u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+ q_vector->tx.itr = q_vector->rx.itr = itr;
+ txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+ tx = true;
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+ rx = true;
+ }
+
+ /* only need to enable the interrupt once, but need
+ * to possibly update both ITR values
+ */
+ if (rx) {
+ /* set the INTENA_MSK_MASK so that this first write
+ * won't actually enable the interrupt, instead just
+ * updating the ITR (it's bit 31 PF and VF)
+ */
+ rxval |= BIT(31);
+ /* don't check _DOWN because interrupt isn't being enabled */
+ wr32(hw, INTREG(vector - 1), rxval);
+ }
+
+enable_int:
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, INTREG(vector - 1), txval);
+
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ else
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c4f5a4e..ebc1bf7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -32,12 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
+#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019
+#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF I40E_ITR_8K
-#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_RX_DEF I40E_ITR_20K
+#define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -89,16 +91,16 @@
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
#define i40e_pf_get_default_rss_hena(pf) \
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -291,6 +293,7 @@
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2,
+ I40E_ULTRA_LATENCY = 3,
};
struct i40e_ring_container {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 85af3b4..301fe2b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1055,8 +1055,8 @@
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
- BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 22841c6..22fc3d4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -112,6 +112,8 @@
struct i40e_ring_container tx;
u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */
+#define ITR_COUNTDOWN_START 100
+ u8 itr_countdown; /* when 0 or 1 update ITR */
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
bool arm_wb_state;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4c4340c..d962164 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,7 +34,7 @@
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.3.21"
+#define DRV_VERSION "1.3.33"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2015 Intel Corporation.";
@@ -282,6 +282,7 @@
/**
* i40evf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
+ * @flush: boolean value whether to run rd32()
**/
void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
{
@@ -305,15 +306,14 @@
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
u32 val;
- u32 ena_mask;
/* handle non-queue interrupts */
- val = rd32(hw, I40E_VFINT_ICR01);
- ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
- val = rd32(hw, I40E_VFINT_DYN_CTL01);
- val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
+ val = rd32(hw, I40E_VFINT_DYN_CTL01) |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, val);
/* schedule work on the private workqueue */
@@ -334,7 +334,7 @@
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -357,6 +357,7 @@
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
}
/**
@@ -377,6 +378,7 @@
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
q_vector->ring_mask |= BIT(t_idx);
}
@@ -1607,6 +1609,7 @@
reset_task);
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_vlan_filter *vlf;
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
@@ -1730,8 +1733,8 @@
f->add = true;
}
/* re-add all VLAN filters */
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- f->add = true;
+ list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+ vlf->add = true;
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -2357,9 +2360,12 @@
err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
- dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
+ dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- return; /* do not reschedule */
+ i40evf_shutdown_adminq(hw);
+ adapter->state = __I40EVF_STARTUP;
+ schedule_delayed_work(&adapter->init_task, HZ * 5);
+ return;
}
schedule_delayed_work(&adapter->init_task, HZ);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index dda0f67..1d21745 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -152,9 +152,17 @@
u16 vlan_count;
u8 spoofchk_enabled;
bool rss_query_enabled;
+ u8 trusted;
+ int xcast_mode;
unsigned int vf_api;
};
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
struct vf_macvlans {
struct list_head l;
int vf;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9f8a7fd..47395ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8407,6 +8407,7 @@
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
+ .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index b1e4703..8daa95f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -102,6 +102,8 @@
#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1d17b58..fcd8b27 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -116,6 +116,12 @@
* we want to disable the querying by default.
*/
adapter->vfinfo[i].rss_query_enabled = 0;
+
+ /* Untrust all VFs */
+ adapter->vfinfo[i].trusted = false;
+
+ /* set the default xcast mode */
+ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
return 0;
@@ -1001,6 +1007,59 @@
return 0;
}
+static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ /* verify the PF is supporting the correct APIs */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
+ !adapter->vfinfo[vf].trusted) {
+ xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+ }
+
+ if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
+ goto out;
+
+ switch (xcast_mode) {
+ case IXGBEVF_XCAST_MODE_NONE:
+ disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ enable = 0;
+ break;
+ case IXGBEVF_XCAST_MODE_MULTI:
+ disable = IXGBE_VMOLR_MPE;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
+ break;
+ case IXGBEVF_XCAST_MODE_ALLMULTI:
+ disable = 0;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ adapter->vfinfo[vf].xcast_mode = xcast_mode;
+
+out:
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1063,6 +1122,9 @@
case IXGBE_VF_GET_RSS_KEY:
retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_UPDATE_XCAST_MODE:
+ retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
@@ -1124,6 +1186,17 @@
IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
}
+static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[vf].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, vf);
+}
+
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1416,6 +1489,28 @@
return 0;
}
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ /* nothing to do */
+ if (adapter->vfinfo[vf].trusted == setting)
+ return 0;
+
+ adapter->vfinfo[vf].trusted = setting;
+
+ /* reset VF to reconfigure features */
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_ping_vf(adapter, vf);
+
+ e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
+
+ return 0;
+}
+
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
@@ -1430,5 +1525,6 @@
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
+ ivi->trusted = adapter->vfinfo[vf].trusted;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2c197e6..dad9257 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -49,6 +49,7 @@
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting);
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 04c7ec84..ec31472 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -471,6 +471,12 @@
board_X550EM_x_vf,
};
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 7570b5c..592ff23 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1894,9 +1894,17 @@
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int flags = netdev->flags;
+ int xcast_mode;
+
+ xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
+ (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+ IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
spin_lock_bh(&adapter->mbx_lock);
+ hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 82f44e0..340cdd4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -112,6 +112,8 @@
#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d1339b0..427f360 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -469,6 +469,46 @@
}
/**
+ * ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @netdev: pointer to net device structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
+ struct net_device *netdev, int xcast_mode)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+ s32 err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+ msgbuf[1] = xcast_mode;
+
+ err = mbx->ops.write_posted(hw, msgbuf, 2);
+ if (err)
+ return err;
+
+ err = mbx->ops.read_posted(hw, msgbuf, 2);
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ return -EPERM;
+
+ return 0;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -727,6 +767,7 @@
.check_link = ixgbevf_check_mac_link_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index d40f036..ef9f773 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -63,6 +63,7 @@
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 603d29d..6bf7259 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -759,11 +759,23 @@
desc->l4i_chk = 0;
desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(dev->dev.parent, data,
- length, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
- WARN(1, "dma_map_single failed!\n");
- return -ENOMEM;
+
+ if (length <= 8 && (uintptr_t)data & 0x7) {
+ /* Copy unaligned small data fragment to TSO header data area */
+ memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+ data, length);
+ desc->buf_ptr = txq->tso_hdrs_dma
+ + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ } else {
+ /* Alignment is okay, map buffer and hand off to hardware */
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+ desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+ length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ desc->buf_ptr))) {
+ WARN(1, "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
}
cmd_sts = BUFFER_OWNED_BY_DMA;
@@ -779,7 +791,8 @@
}
static inline void
-txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+ u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -788,6 +801,7 @@
int ret;
u32 cmd_csum = 0;
u16 l4i_chk = 0;
+ u32 cmd_sts;
tx_index = txq->tx_curr_desc;
desc = &txq->tx_desc_area[tx_index];
@@ -803,9 +817,17 @@
desc->byte_cnt = hdr_len;
desc->buf_ptr = txq->tso_hdrs_dma +
txq->tx_curr_desc * TSO_HEADER_SIZE;
- desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
+ cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
GEN_CRC;
+ /* Defer updating the first command descriptor until all
+ * following descriptors have been written.
+ */
+ if (first_desc)
+ *first_cmd_sts = cmd_sts;
+ else
+ desc->cmd_sts = cmd_sts;
+
txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -819,6 +841,8 @@
int desc_count = 0;
struct tso_t tso;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct tx_desc *first_tx_desc;
+ u32 first_cmd_sts = 0;
/* Count needed descriptors */
if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
@@ -826,11 +850,14 @@
return -EBUSY;
}
+ first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
/* Initialize the TSO handler, and prepare the first payload */
tso_start(skb, &tso);
total_len = skb->len - hdr_len;
while (total_len > 0) {
+ bool first_desc = (desc_count == 0);
char *hdr;
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
@@ -840,7 +867,8 @@
/* prepare packet headers: MAC + IP + TCP */
hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- txq_put_hdr_tso(skb, txq, data_left);
+ txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+ first_desc);
while (data_left > 0) {
int size;
@@ -860,6 +888,10 @@
__skb_queue_tail(&txq->tx_skb, skb);
skb_tx_timestamp(skb);
+ /* ensure all other descriptors are written before first cmd_sts */
+ wmb();
+ first_tx_desc->cmd_sts = first_cmd_sts;
+
/* clear TX_END status */
mp->work_tx_end &= ~(1 << txq->index);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index dd6fe94..a47496a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -101,6 +101,8 @@
#define MVNETA_TXQ_CMD 0x2448
#define MVNETA_TXQ_DISABLE_SHIFT 8
#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
+#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
#define MVNETA_ACC_MODE 0x2500
@@ -192,7 +194,7 @@
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
-#define MVNETA_MIB_COUNTERS_BASE 0x3080
+#define MVNETA_MIB_COUNTERS_BASE 0x3000
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
#define MVNETA_DA_FILT_OTH_MCAST 0x3500
@@ -278,6 +280,50 @@
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+struct mvneta_statistic {
+ unsigned short offset;
+ unsigned short type;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define T_REG_32 32
+#define T_REG_64 64
+
+static const struct mvneta_statistic mvneta_statistics[] = {
+ { 0x3000, T_REG_64, "good_octets_received", },
+ { 0x3010, T_REG_32, "good_frames_received", },
+ { 0x3008, T_REG_32, "bad_octets_received", },
+ { 0x3014, T_REG_32, "bad_frames_received", },
+ { 0x3018, T_REG_32, "broadcast_frames_received", },
+ { 0x301c, T_REG_32, "multicast_frames_received", },
+ { 0x3050, T_REG_32, "unrec_mac_control_received", },
+ { 0x3058, T_REG_32, "good_fc_received", },
+ { 0x305c, T_REG_32, "bad_fc_received", },
+ { 0x3060, T_REG_32, "undersize_received", },
+ { 0x3064, T_REG_32, "fragments_received", },
+ { 0x3068, T_REG_32, "oversize_received", },
+ { 0x306c, T_REG_32, "jabber_received", },
+ { 0x3070, T_REG_32, "mac_receive_error", },
+ { 0x3074, T_REG_32, "bad_crc_event", },
+ { 0x3078, T_REG_32, "collision", },
+ { 0x307c, T_REG_32, "late_collision", },
+ { 0x2484, T_REG_32, "rx_discard", },
+ { 0x2488, T_REG_32, "rx_overrun", },
+ { 0x3020, T_REG_32, "frames_64_octets", },
+ { 0x3024, T_REG_32, "frames_65_to_127_octets", },
+ { 0x3028, T_REG_32, "frames_128_to_255_octets", },
+ { 0x302c, T_REG_32, "frames_256_to_511_octets", },
+ { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
+ { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
+ { 0x3038, T_REG_64, "good_octets_sent", },
+ { 0x3040, T_REG_32, "good_frames_sent", },
+ { 0x3044, T_REG_32, "excessive_collision", },
+ { 0x3048, T_REG_32, "multicast_frames_sent", },
+ { 0x304c, T_REG_32, "broadcast_frames_sent", },
+ { 0x3054, T_REG_32, "fc_sent", },
+ { 0x300c, T_REG_32, "internal_mac_transmit_err", },
+};
+
struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
@@ -324,6 +370,8 @@
unsigned int speed;
unsigned int tx_csum_limit;
int use_inband_status:1;
+
+ u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -530,6 +578,8 @@
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -758,7 +808,6 @@
u32 q_map;
/* Enable all initialized TXs. */
- mvneta_mib_counters_clear(pp);
q_map = 0;
for (queue = 0; queue < txq_number; queue++) {
struct mvneta_tx_queue *txq = &pp->txqs[queue];
@@ -1035,6 +1084,8 @@
mvreg_write(pp, MVNETA_INTR_ENABLE,
(MVNETA_RXQ_INTR_ENABLE_ALL_MASK
| MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+
+ mvneta_mib_counters_clear(pp);
}
/* Set max sizes for tx queues */
@@ -2982,6 +3033,65 @@
return 0;
}
+static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ }
+}
+
+static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
+{
+ const struct mvneta_statistic *s;
+ void __iomem *base = pp->base;
+ u32 high, low, val;
+ int i;
+
+ for (i = 0, s = mvneta_statistics;
+ s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
+ s++, i++) {
+ val = 0;
+
+ switch (s->type) {
+ case T_REG_32:
+ val = readl_relaxed(base + s->offset);
+ break;
+ case T_REG_64:
+ /* Docs say to read low 32-bit then high */
+ low = readl_relaxed(base + s->offset);
+ high = readl_relaxed(base + s->offset + 4);
+ val = (u64)high << 32 | low;
+ break;
+ }
+
+ pp->ethtool_stats[i] += val;
+ }
+}
+
+static void mvneta_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int i;
+
+ mvneta_ethtool_update_stats(pp);
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ *data++ = pp->ethtool_stats[i];
+}
+
+static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(mvneta_statistics);
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -3003,6 +3113,9 @@
.get_drvinfo = mvneta_ethtool_get_drvinfo,
.get_ringparam = mvneta_ethtool_get_ringparam,
.set_ringparam = mvneta_ethtool_set_ringparam,
+ .get_strings = mvneta_ethtool_get_strings,
+ .get_ethtool_stats = mvneta_ethtool_get_stats,
+ .get_sset_count = mvneta_ethtool_get_sset_count,
};
/* Initialize hw */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index bd80ac7..97f0d93 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -287,7 +287,7 @@
mlxsw_emad_op_tlv_status_set(op_tlv, 0);
mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
- if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+ if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
mlxsw_emad_op_tlv_method_set(op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_QUERY);
else
@@ -362,7 +362,7 @@
char *op_tlv;
op_tlv = mlxsw_emad_op_tlv(skb);
- return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+ return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
#define MLXSW_EMAD_TIMEOUT_MS 200
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 371ea3f..de69e71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1662,8 +1662,9 @@
CIR_OUT_PARAM_LO));
memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
}
- } else if (!err && out_mbox)
+ } else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+ }
mutex_unlock(&mlxsw_pci->cmd.lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 4fcba46..236fb5d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -348,8 +348,9 @@
MLXSW_REG_SFD_REC_LEN, 0x0C, false);
/* reg_sfd_uc_sub_port
- * LAG sub port.
- * Must be 0 if multichannel VEPA is not enabled.
+ * VEPA channel on local port.
+ * Valid only if local port is a non-stacking port. Must be 0 if multichannel
+ * VEPA is not enabled.
* Access: RW
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
@@ -396,10 +397,9 @@
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
-static inline void
-mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
- char *mac, u16 *p_vid,
- u8 *p_local_port)
+static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u8 *p_local_port)
{
mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
*p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
@@ -474,7 +474,7 @@
MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
MLXSW_REG_SFN_REC_LEN, 0x02);
-/* reg_sfd_mac_sub_port
+/* reg_sfn_mac_sub_port
* VEPA channel on the local port.
* 0 if multichannel VEPA is not enabled.
* Access: RO
@@ -482,14 +482,14 @@
MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
MLXSW_REG_SFN_REC_LEN, 0x08, false);
-/* reg_sfd_mac_fid
+/* reg_sfn_mac_fid
* Filtering identifier.
* Access: RO
*/
MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
MLXSW_REG_SFN_REC_LEN, 0x08, false);
-/* reg_sfd_mac_system_port
+/* reg_sfn_mac_system_port
* Unique port identifier for the final destination of the packet.
* Access: RO
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6e9906d..3be4a23 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1227,6 +1227,7 @@
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1;
+ mlxsw_sp_port->uc_flood = 1;
mlxsw_sp_port->pvid = 1;
mlxsw_sp_port->pcpu_stats =
@@ -1899,12 +1900,12 @@
if (err)
netdev_err(dev, "Failed to join bridge\n");
mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
- mlxsw_sp_port->bridged = true;
+ mlxsw_sp_port->bridged = 1;
} else {
err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
if (err)
netdev_err(dev, "Failed to leave bridge\n");
- mlxsw_sp_port->bridged = false;
+ mlxsw_sp_port->bridged = 0;
mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index fc00749..4365c8b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -84,10 +84,11 @@
struct mlxsw_sp *mlxsw_sp;
u8 local_port;
u8 stp_state;
- u8 learning:1;
- u8 learning_sync:1;
+ u8 learning:1,
+ learning_sync:1,
+ uc_flood:1,
+ bridged:1;
u16 pvid;
- bool bridged;
/* 802.1Q bridge VLANs */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* VLAN interfaces */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index c39b7a1..617fb22 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -66,7 +66,8 @@
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags =
(mlxsw_sp_port->learning ? BR_LEARNING : 0) |
- (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0);
+ (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
+ (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
break;
default:
return -EOPNOTSUPP;
@@ -123,15 +124,89 @@
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
}
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end, bool set,
+ bool only_uc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 range = fid_end - fid_begin + 1;
+ char *sftr_pl;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ if (err)
+ goto buffer_out;
+
+ /* Flooding control allows one to decide whether a given port will
+ * flood unicast traffic for which there is no FDB entry.
+ */
+ if (only_uc)
+ goto buffer_out;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+buffer_out:
+ kfree(sftr_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool set)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ u16 vid, last_visited_vid;
+ int err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
+ true);
+ if (err) {
+ last_visited_vid = vid;
+ goto err_port_flood_set;
+ }
+ }
+
+ return 0;
+
+err_port_flood_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
+ netdev_err(dev, "Failed to configure unicast flooding\n");
+ return err;
+}
+
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long brport_flags)
{
+ unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+ bool set;
+ int err;
+
if (switchdev_trans_ph_prepare(trans))
return 0;
+ if ((uc_flood ^ brport_flags) & BR_FLOOD) {
+ set = mlxsw_sp_port->uc_flood ? false : true;
+ err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+ if (err)
+ return err;
+ }
+
+ mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+
return 0;
}
@@ -150,9 +225,10 @@
static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
- unsigned long ageing_jiffies)
+ unsigned long ageing_clock_t)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
if (switchdev_trans_ph_prepare(trans))
@@ -247,40 +323,6 @@
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
}
-static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid, bool set, bool only_uc)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *sftr_pl;
- int err;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid,
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
- mlxsw_sp_port->local_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
- if (err)
- goto buffer_out;
-
- /* Flooding control allows one to decide whether a given port will
- * flood unicast traffic for which there is no FDB entry.
- */
- if (only_uc)
- goto buffer_out;
-
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid,
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
- mlxsw_sp_port->local_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
-buffer_out:
- kfree(sftr_pl);
- return err;
-}
-
static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
u16 vid_end)
{
@@ -345,14 +387,13 @@
netdev_err(dev, "Failed to map FID=%d", vid);
return err;
}
+ }
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, true,
- false);
- if (err) {
- netdev_err(dev, "Failed to set flooding for FID=%d",
- vid);
- return err;
- }
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ true, false);
+ if (err) {
+ netdev_err(dev, "Failed to configure flooding\n");
+ return err;
}
for (vid = vid_begin; vid <= vid_end;
@@ -530,15 +571,14 @@
if (init)
goto out;
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, false,
- false);
- if (err) {
- netdev_err(dev, "Failed to clear flooding for FID=%d",
- vid);
- return err;
- }
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
+ if (err) {
+ netdev_err(dev, "Failed to clear flooding\n");
+ return err;
+ }
+ for (vid = vid_begin; vid <= vid_end; vid++) {
/* Remove FID mapping in case of Virtual mode */
err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
if (err) {
@@ -692,7 +732,7 @@
return err;
}
-const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
+static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_attr_get = mlxsw_sp_port_attr_get,
.switchdev_port_attr_set = mlxsw_sp_port_attr_set,
.switchdev_port_obj_add = mlxsw_sp_port_obj_add,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 50e29c4..d85960c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1147,7 +1147,7 @@
}
status = mlxsw_reg_pude_oper_status_get(pude_pl);
- if (MLXSW_PORT_OPER_STATUS_UP == status) {
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
netdev_info(mlxsw_sx_port->dev, "link up\n");
netif_carrier_on(mlxsw_sx_port->dev);
} else {
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index e1329d9..bf08ce2 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -617,10 +617,10 @@
(eidled & REVID_MASK) >> REVID_SHIFT);
/* PHY Leds: link status,
- * LEDA: Link + transmit/receive events
- * LEDB: Link State + colision events
+ * LEDA: Link State + collision events
+ * LEDB: Link State + transmit/receive events
*/
- encx24j600_update_reg(priv, EIDLED, 0xbc00, 0xbc00);
+ encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
/* Loopback disabled */
encx24j600_write_reg(priv, MACON1, 0x9);
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index f1f0108..30a6f24 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -91,4 +91,15 @@
---help---
This enables the support for NetXen's Gigabit Ethernet card.
+config QED
+ tristate "QLogic QED 25/40/100Gb core driver"
+ depends on PCI
+ ---help---
+ This enables the support for ...
+
+config QEDE
+ tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+ depends on QED
+ ---help---
+ This enables the support for ...
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index b2a283d..cee90e0 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -6,3 +6,5 @@
obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644
index 0000000..5c2fd57
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644
index 0000000..ac17d86
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -0,0 +1,496 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+#define DRV_MODULE_VERSION "8.4.0.0"
+
+#define MAX_HWFNS_PER_DEVICE (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+/* cau states */
+enum qed_coalescing_mode {
+ QED_COAL_MODE_DISABLE,
+ QED_COAL_MODE_ENABLE
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+
+/* helpers */
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ (val == (cond1) ? true1 : \
+ (val == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_mcp_info;
+
+struct qed_rt_data {
+ u32 init_val;
+ bool b_valid;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum qed_pci_personality {
+ QED_PCI_ETH,
+ QED_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+enum QED_RESOURCES {
+ QED_SB,
+ QED_L2_QUEUE,
+ QED_VPORT,
+ QED_RSS_ENG,
+ QED_PQ,
+ QED_RL,
+ QED_MAC,
+ QED_VLAN,
+ QED_ILT,
+ QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+ QED_PF_L2_QUE,
+ QED_MAX_FEATURES,
+};
+
+enum QED_PORT_MODE {
+ QED_PORT_MODE_DE_2X40G,
+ QED_PORT_MODE_DE_2X50G,
+ QED_PORT_MODE_DE_1X100G,
+ QED_PORT_MODE_DE_4X10G_F,
+ QED_PORT_MODE_DE_4X10G_E,
+ QED_PORT_MODE_DE_4X20G,
+ QED_PORT_MODE_DE_1X40G,
+ QED_PORT_MODE_DE_2X25G,
+ QED_PORT_MODE_DE_1X25G
+};
+
+struct qed_hw_info {
+ /* PCI personality */
+ enum qed_pci_personality personality;
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[QED_MAX_RESC];
+ u32 resc_num[QED_MAX_RESC];
+ u32 feat_num[QED_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ u8 num_tc;
+ u8 offload_tc;
+ u8 non_offload_tc;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ u32 vendor_id;
+ u32 device_id;
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+
+ struct qed_igu_info *p_igu_info;
+
+ u32 port_mode;
+ u32 hw_mode;
+};
+
+struct qed_hw_cid_data {
+ u32 cid;
+ bool b_cid_allocated;
+
+ /* Additional identifiers */
+ u16 opaque_fid;
+ u8 vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct qed_dmae_info {
+ /* Mutex for synchronizing access to functions */
+ struct mutex mutex;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u8 pure_lb_pq;
+ u8 offload_pq;
+ u8 pure_ack_pq;
+ u8 vf_queues_offset;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct qed_storm_stats {
+ struct storm_stats mstats;
+ struct storm_stats pstats;
+ struct storm_stats tstats;
+ struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+ struct fw_ver_info *fw_ver_info;
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ u32 init_ops_size;
+};
+
+struct qed_simd_fp_handler {
+ void *token;
+ void (*func)(void *);
+};
+
+struct qed_hwfn {
+ struct qed_dev *cdev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ bool first_on_engine;
+ bool hw_init_done;
+
+ /* BAR access */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct qed_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct qed_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct qed_rt_data *rt_data;
+
+ /* SPQ */
+ struct qed_spq *p_spq;
+
+ /* EQ */
+ struct qed_eq *p_eq;
+
+ /* Consolidate Q*/
+ struct qed_consq *p_consq;
+
+ /* Slow-Path definitions */
+ struct tasklet_struct *sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct qed_ptt *p_main_ptt;
+ struct qed_ptt *p_dpc_ptt;
+
+ struct qed_sb_sp_info *p_sp_sb;
+ struct qed_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ struct qed_pf_params pf_params;
+
+ /* Array of sb_info of all status blocks */
+ struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
+
+ struct qed_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+
+ struct qed_mcp_info *mcp_info;
+
+ struct qed_hw_cid_data *p_tx_cids;
+ struct qed_hw_cid_data *p_rx_cids;
+
+ struct qed_dmae_info dmae_info;
+
+ /* QM init */
+ struct qed_qm_info qm_info;
+ struct qed_storm_stats storm_stats;
+
+ /* Buffer for unzipping firmware data */
+ void *unzip_buf;
+
+ struct qed_simd_fp_handler simd_proto_handler[64];
+
+ struct z_stream_s *stream;
+};
+
+struct pci_params {
+ int pm_cap;
+
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned int irq;
+ u8 pf_num;
+};
+
+struct qed_int_param {
+ u32 int_mode;
+ u8 num_vectors;
+ u8 min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+ struct qed_int_param in;
+ struct qed_int_param out;
+ struct msix_entry *msix_table;
+ bool fp_initialized;
+ u8 fp_msix_base;
+ u8 fp_msix_cnt;
+};
+
+struct qed_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ u8 type;
+#define QED_DEV_TYPE_BB_A0 (0 << 0)
+#define QED_DEV_TYPE_MASK (0x3)
+#define QED_DEV_TYPE_SHIFT (0)
+
+ u16 chip_num;
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 16
+
+ u16 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 12
+
+ u16 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xf
+#define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports_in_engines;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+ enum mf_mode mf_mode;
+#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+ u8 ver_str[VER_SIZE];
+
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ u8 wol;
+
+ u32 int_mode;
+ enum qed_coalescing_mode int_coalescing_mode;
+ u8 rx_coalesce_usecs;
+ u8 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const struct iro *iro_arr;
+#define IRO (p_hwfn->cdev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+ u32 drv_type;
+
+ struct qed_eth_stats *reset_stats;
+ struct qed_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Linux specific here */
+ struct qede_dev *edev;
+ struct pci_dev *pdev;
+ int msg_enable;
+
+ struct pci_params pci_params;
+
+ struct qed_int_params int_params;
+
+ u8 protocol;
+#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
+
+ /* Callbacks to protocol driver */
+ union {
+ struct qed_common_cb_ops *common;
+ struct qed_eth_cb_ops *eth;
+ } protocol_ops;
+ void *ops_cookie;
+
+ const struct firmware *firmware;
+};
+
+#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
+ QED_DEV_TYPE_SHIFT)
+#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
+#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
+
+#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
+#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+
+/**
+ * @brief qed_concrete_to_sw_fid - get the sw function id from
+ * the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return inline u8
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+ u32 concrete_fid)
+{
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+
+ return pfid;
+}
+
+#define PURE_LB_TC 8
+
+#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
+ (cdev->regview) + \
+ (offset))
+
+#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val) \
+ writel((u32)val, (void __iomem *)((u8 __iomem *)\
+ (cdev->doorbells) + (db_addr)))
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+ u32 input_len, u8 *input_buf,
+ u32 max_size, u8 *unzip_buf);
+
+#define QED_ETH_INTERFACE_VERSION 300
+
+#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644
index 0000000..7ccdb46
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -0,0 +1,847 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE 3
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+};
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cid_start;
+};
+
+/* ILT Client configuration, Per connection type (protocol) resources. */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_QM,
+ ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+};
+
+/* Per Path -
+ * ILT shadow table
+ * Protocol acquired CID lists
+ * PF start line in ILT
+ */
+struct qed_dma_mem {
+ dma_addr_t p_phys;
+ void *p_virt;
+ size_t size;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ /* ILT shadow table */
+ struct qed_dma_mem *ilt_shadow;
+ u32 pf_start_line;
+};
+
+static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+{
+ u32 type, pf_cids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ pf_cids += p_mngr->conn_cfg[type].cid_count;
+
+ return pf_cids;
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_qm_iids *iids)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 start_line, u32 total_size,
+ u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verify thatits called only once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size,
+ p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val,
+ p_cli->last.val, p_blk->total_size,
+ p_blk->real_size_in_page, p_blk->start_line);
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 curr_line, total, pf_cids;
+ struct qed_qm_iids qm_iids;
+
+ memset(&qm_iids, 0, sizeof(qm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ curr_line = p_mngr->pf_start_line;
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC and QM clients */
+ pf_cids = qed_cxt_cdu_iids(p_mngr);
+
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+ total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* QM */
+ p_cli = &p_mngr->clients[ILT_CLI_QM];
+ p_blk = &p_cli->pf_blks[0];
+
+ qed_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
+ p_hwfn->qm_info.num_pqs, 0);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, QED_ILT)) {
+ DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+ curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < ILT_CLI_MAX; pos++)
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
+ }
+
+ return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_dma->size, p_dma->p_virt,
+ p_dma->p_phys);
+ p_dma->p_virt = NULL;
+ }
+ kfree(p_mngr->ilt_shadow);
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client,
+ u32 start_line_offset)
+{
+ struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left;
+
+ if (!p_blk->total_size)
+ return 0;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = min_t(u32, sz_left,
+ p_blk->real_size_in_page);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_phys,
+ GFP_KERNEL);
+ if (!p_virt)
+ return -ENOMEM;
+ memset(p_virt, 0, size);
+
+ ilt_shadow[line].p_phys = p_phys;
+ ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+ line, (u64)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *clients = p_mngr->clients;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 size, i, j;
+ int rc;
+
+ size = qed_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+ GFP_KERNEL);
+ if (!p_mngr->ilt_shadow) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
+ rc = -ENOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct qed_dma_mem)));
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
+
+ return 0;
+
+ilt_shadow_fail:
+ qed_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ kfree(p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+ }
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 size;
+
+ if (cid_cnt == 0)
+ continue;
+
+ size = DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
+ if (!p_mngr->acquired[type].cid_map)
+ goto cid_map_fail;
+
+ p_mngr->acquired[type].max_count = cid_cnt;
+ p_mngr->acquired[type].start_cid = start_cid;
+
+ p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_mngr->acquired[type].start_cid,
+ p_mngr->acquired[type].max_count);
+ start_cid += cid_cnt;
+ }
+
+ return 0;
+
+cid_map_fail:
+ qed_cid_map_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+ if (!p_mngr) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize ILT client registers */
+ p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ /* default ILT page size for all clients is 32K */
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate the ILT shadow table */
+ rc = qed_ilt_shadow_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = qed_cid_map_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+ goto tables_alloc_fail;
+ }
+
+ return 0;
+
+tables_alloc_fail:
+ qed_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ qed_cid_map_free(p_hwfn);
+ qed_ilt_shadow_free(p_hwfn);
+ kfree(p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+
+ if (cid_cnt == 0)
+ continue;
+
+ memset(p_mngr->acquired[type].cid_map, 0,
+ DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long));
+ }
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_iids iids;
+
+ memset(&iids, 0, sizeof(iids));
+ qed_cxt_qm_iids(p_hwfn, &iids);
+
+ memset(¶ms, 0, sizeof(params));
+ params.port_id = p_hwfn->port_id;
+ params.pf_id = p_hwfn->rel_pf_id;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.is_first_pf = p_hwfn->first_on_engine;
+ params.num_pf_cids = iids.cids;
+ params.start_pq = qm_info->start_pq;
+ params.num_pf_pqs = qm_info->num_pqs;
+ params.start_vport = qm_info->num_vports;
+ params.pf_wfq = qm_info->pf_wfq;
+ params.pf_rl = qm_info->pf_rl;
+ params.pq_params = qm_info->qm_pq_params;
+ params.vport_params = qm_info->qm_vport_params;
+
+ qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, ¶ms);
+}
+
+/* CM PF */
+static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ union qed_qm_pq_params pq_params;
+ u16 pq;
+
+ /* XCM pure-LB queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+ return 0;
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ /* 5 - PF */
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg,
+ ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients;
+ struct qed_cxt_mngr *p_mngr;
+ struct qed_dma_mem *p_shdw;
+ u32 line, rt_offst, i;
+
+ qed_ilt_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+
+ /** Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].p_virt) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].p_phys >> 12));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+ rt_offst, line, i,
+ (u64)(p_shdw[line].p_phys >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+ qed_cdu_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+{
+ qed_qm_init_pf(p_hwfn);
+ qed_cm_init_pf(p_hwfn);
+ qed_dq_init_pf(p_hwfn);
+ qed_ilt_init_pf(p_hwfn);
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
+ p_mngr->acquired[type].max_count);
+
+ if (rel_cid >= p_mngr->acquired[type].max_count) {
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+
+ *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+ return 0;
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+ u32 cid,
+ enum protocol_type *p_type)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ enum protocol_type p;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (p = 0; p < MAX_CONN_TYPES; p++) {
+ p_map = &p_mngr->acquired[p];
+
+ if (!p_map->cid_map)
+ continue;
+ if (cid >= p_map->start_cid &&
+ cid < p_map->start_cid + p_map->max_count)
+ break;
+ }
+ *p_type = p;
+
+ if (p == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
+ return false;
+ }
+
+ rel_cid = cid - p_map->start_cid;
+ if (!test_bit(rel_cid, p_map->cid_map)) {
+ DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
+ return false;
+ }
+ return true;
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_mngr->acquired[type].start_cid;
+ __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+ if (!b_acquired)
+ return -EINVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].p_virt)
+ return -EINVAL;
+
+ p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+ return 0;
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644
index 0000000..c8e1f5e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
+/**
+ * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return int
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info);
+
+enum qed_cxt_elem_type {
+ QED_ELEM_CXT,
+ QED_ELEM_TASK
+};
+
+/**
+ * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644
index 0000000..b9b7b7e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -0,0 +1,1797 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/* API common to all protocols */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module, u8 dp_level)
+{
+ u32 i;
+
+ cdev->dp_level = dp_level;
+ cdev->dp_module = dp_module;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ }
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->cdev = cdev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+ mutex_init(&p_hwfn->dmae_info.mutex);
+ }
+
+ /* hwfn 0 is always active */
+ cdev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 */
+ cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ kfree(qm_info->qm_pq_params);
+ qm_info->qm_pq_params = NULL;
+ kfree(qm_info->qm_vport_params);
+ qm_info->qm_vport_params = NULL;
+ kfree(qm_info->qm_port_params);
+ qm_info->qm_port_params = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+ int i;
+
+ kfree(cdev->fw_data);
+ cdev->fw_data = NULL;
+
+ kfree(cdev->reset_stats);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ kfree(p_hwfn->p_tx_cids);
+ p_hwfn->p_tx_cids = NULL;
+ kfree(p_hwfn->p_rx_cids);
+ p_hwfn->p_rx_cids = NULL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_free(p_hwfn);
+ qed_qm_info_free(p_hwfn);
+ qed_spq_free(p_hwfn);
+ qed_eq_free(p_hwfn, p_hwfn->p_eq);
+ qed_consq_free(p_hwfn, p_hwfn->p_consq);
+ qed_int_free(p_hwfn);
+ qed_dmae_info_free(p_hwfn);
+ }
+}
+
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_port_params *p_qm_port;
+ u8 num_vports, i, vport_id, num_ports;
+ u16 num_pqs, multi_cos_tcs = 1;
+
+ memset(qm_info, 0, sizeof(*qm_info));
+
+ num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+
+ /* Sanity checking that setup requires legal number of resources */
+ if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
+ DP_ERR(p_hwfn,
+ "Need too many Physical queues - 0x%04x when only %04x are available\n",
+ num_pqs, RESC_NUM(p_hwfn, QED_PQ));
+ return -EINVAL;
+ }
+
+ /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
+ */
+ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+ num_pqs, GFP_ATOMIC);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+ num_vports, GFP_ATOMIC);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+ MAX_NUM_PORTS, GFP_ATOMIC);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ /* First init per-TC PQs */
+ for (i = 0; i < multi_cos_tcs; i++) {
+ struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ }
+
+ /* Then init pure-LB PQ */
+ qm_info->pure_lb_pq = i;
+ qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[i].wrr_group = 1;
+ i++;
+
+ qm_info->offload_pq = 0;
+ qm_info->num_pqs = num_pqs;
+ qm_info->num_vports = num_vports;
+
+ /* Initialize qm port parameters */
+ num_ports = p_hwfn->cdev->num_ports_in_engines;
+ for (i = 0; i < num_ports; i++) {
+ p_qm_port = &qm_info->qm_port_params[i];
+ p_qm_port->active = 1;
+ p_qm_port->num_active_phys_tcs = 4;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ }
+
+ qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ qm_info->pf_wfq = 0;
+ qm_info->pf_rl = 0;
+ qm_info->vport_rl_en = 1;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ kfree(qm_info->qm_pq_params);
+ kfree(qm_info->qm_vport_params);
+ kfree(qm_info->qm_port_params);
+
+ return -ENOMEM;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+ struct qed_consq *p_consq;
+ struct qed_eq *p_eq;
+ int i, rc = 0;
+
+ cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+ if (!cdev->fw_data)
+ return -ENOMEM;
+
+ /* Allocate Memory for the Queue->CID mapping */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ int tx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ int rx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
+ if (!p_hwfn->p_tx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Tx Cids\n");
+ goto alloc_err;
+ }
+
+ p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
+ if (!p_hwfn->p_rx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Rx Cids\n");
+ goto alloc_err;
+ }
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* First allocate the context manager structure */
+ rc = qed_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the contest manager)
+ * Must be done prior to any further computations.
+ */
+ rc = qed_cxt_set_pf_params(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Prepare and process QM requirements */
+ rc = qed_init_qm_info(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Compute the ILT client partition */
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = qed_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = qed_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ p_eq = qed_eq_alloc(p_hwfn, 256);
+
+ if (!p_eq)
+ goto alloc_err;
+ p_hwfn->p_eq = p_eq;
+
+ p_consq = qed_consq_alloc(p_hwfn);
+ if (!p_consq)
+ goto alloc_err;
+ p_hwfn->p_consq = p_consq;
+
+ /* DMA info initialization */
+ rc = qed_dmae_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dmae_info structure\n");
+ goto alloc_err;
+ }
+ }
+
+ cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+ if (!cdev->reset_stats) {
+ DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
+ goto alloc_err;
+ }
+
+ return 0;
+
+alloc_err:
+ qed_resc_free(cdev);
+ return rc;
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_setup(p_hwfn);
+ qed_spq_setup(p_hwfn);
+ qed_eq_setup(p_hwfn, p_hwfn->p_eq);
+ qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+ }
+}
+
+#define FINAL_CLEANUP_CMD_OFFSET (0)
+#define FINAL_CLEANUP_CMD (0x1)
+#define FINAL_CLEANUP_VALID_OFFSET (6)
+#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
+#define FINAL_CLEANUP_COMP (0x2)
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ int rc = -EBUSY;
+
+ addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+
+ command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
+ command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
+ command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
+ command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+ /* Make sure notification is not set before initiating final cleanup */
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(
+ p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ id, command);
+
+ qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ msleep(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = 0;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ hw_mode = (1 << MODE_BB_A0);
+
+ switch (p_hwfn->cdev->num_ports_in_engines) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+ return;
+ }
+
+ switch (p_hwfn->cdev->mf_mode) {
+ case SF:
+ hw_mode |= 1 << MODE_SF;
+ break;
+ case MF_OVLAN:
+ hw_mode |= 1 << MODE_MF_SD;
+ break;
+ case MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
+ hw_mode |= 1 << MODE_SF;
+ }
+
+ hw_mode |= 1 << MODE_ASIC;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, sb_id;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ if (!p_block->is_pf)
+ continue;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id,
+ 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
+ sb_entry);
+ }
+ }
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_common_rt_init_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc = 0;
+
+ qed_init_cau_rt_data(cdev);
+
+ /* Program GTT windows */
+ qed_gtt_init(p_hwfn);
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = 1;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = 1;
+ }
+
+ memset(¶ms, 0, sizeof(params));
+ params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.pf_rl_en = qm_info->pf_rl_en;
+ params.pf_wfq_en = qm_info->pf_wfq_en;
+ params.vport_rl_en = qm_info->vport_rl_en;
+ params.vport_wfq_en = qm_info->vport_wfq_en;
+ params.port_params = qm_info->qm_port_params;
+
+ qed_qm_common_rt_init(p_hwfn, ¶ms);
+
+ qed_cxt_hw_init_common(p_hwfn);
+
+ /* Close gate from NIG to BRB/Storm; By default they are open, but
+ * we close them to prevent NIG from passing data to reset blocks.
+ * Should have been done in the ENGINE phase, but init-tool lacks
+ * proper port-pretend capabilities.
+ */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_unpretend(p_hwfn, p_ptt);
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ if (rc != 0)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ /* Disable relaxed ordering in the PCI config space */
+ qed_wr(p_hwfn, p_ptt, 0x20b4,
+ qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+
+ return rc;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ int rc = 0;
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ return rc;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch)
+{
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ int rc = 0;
+
+ if (p_hwfn->mcp_info) {
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100;
+ }
+
+ qed_cxt_hw_init_pf(p_hwfn);
+
+ qed_int_igu_init_rt(p_hwfn);
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & (1 << MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & MODE_MF_SI) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn,
+ NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+ }
+
+ /* Protocl Configuration */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+ /* Cleanup chip from previous driver if such remains exist */
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ if (rc != 0)
+ return rc;
+
+ /* PF Init sequence */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* Pure runtime initializations - directly to the HW */
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ if (b_hw_start) {
+ /* enable interrupts */
+ qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+ /* send function start command */
+ rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+ }
+ return rc;
+}
+
+static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 enable)
+{
+ u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+ /* Change PF in PXP */
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ usleep_range(50, 60);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+}
+
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data)
+{
+ struct qed_storm_stats *p_stat;
+ u32 load_code, param, *p_address;
+ int rc, mfw_rc, i;
+ u8 fw_vport = 0;
+
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
+ if (rc != 0)
+ return rc;
+
+ /* Enable DMAE in PXP */
+ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+ qed_calc_hw_mode(p_hwfn);
+
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_code);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ return rc;
+ }
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
+ rc, load_code);
+
+ p_hwfn->first_on_engine = (load_code ==
+ FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode,
+ b_hw_start, int_mode,
+ allow_npar_tx_switch);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "init phase failed for loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+
+ /* ACK mfw regardless of success or failure of initialization */
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_LOAD_DONE,
+ 0, &load_code, ¶m);
+ if (rc)
+ return rc;
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
+ return mfw_rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+
+ /* init PF stats */
+ p_stat = &p_hwfn->storm_stats;
+ p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+
+ p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+
+ p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+
+ p_address = &p_stat->tstats.address;
+ *p_address = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+ }
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+int qed_hw_stop(struct qed_dev *cdev)
+{
+ int rc = 0, t_rc;
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ rc = qed_sp_pf_stop(p_hwfn);
+ if (rc)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ /* Disable Attention Generation */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt,
+ false);
+ if (t_rc != 0)
+ rc = t_rc;
+
+ return rc;
+}
+
+void qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFDOWN,
+ "Shutting down the fastpath\n");
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+}
+
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static int qed_reg_assert(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt, u32 reg,
+ bool expected)
+{
+ u32 assert_val = qed_rd(hwfn, ptt, reg);
+
+ if (assert_val != expected) {
+ DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ reg, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_hw_reset(struct qed_dev *cdev)
+{
+ int rc = 0;
+ u32 unload_resp, unload_param;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+
+ /* Check for incorrect states */
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ TCFC_REG_STRONG_ENABLE_PF, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ CCFC_REG_STRONG_ENABLE_PF, 0);
+
+ /* Send unload command to MCP */
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_REQ,
+ DRV_MB_PARAM_UNLOAD_WOL_MCP,
+ &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
+ unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_DONE,
+ 0, &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+ qed_ptt_pool_free(p_hwfn);
+ kfree(p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
+ if (rc)
+ return rc;
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* clear indirect access */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+ /* Clean Previous errors if such exist */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+
+ /* enable internal target-read */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ return 0;
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ int num_features = 1;
+
+ feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
+ num_features,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE));
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+ feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
+ num_features);
+}
+
+static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ int num_funcs, i;
+
+ num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
+ : p_hwfn->cdev->num_ports_in_engines;
+
+ resc_num[QED_SB] = min_t(u32,
+ (MAX_SB_PER_PATH_BB / num_funcs),
+ qed_int_get_num_sbs(p_hwfn, NULL));
+ resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
+ resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
+ resc_num[QED_RL] = 8;
+ resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+ resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
+ num_funcs;
+ resc_num[QED_ILT] = 950;
+
+ for (i = 0; i < QED_MAX_RESC; i++)
+ resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+ qed_hw_set_feat(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "The numbers for each resource are:\n"
+ "SB = %d start = %d\n"
+ "L2_QUEUE = %d start = %d\n"
+ "VPORT = %d start = %d\n"
+ "PQ = %d start = %d\n"
+ "RL = %d start = %d\n"
+ "MAC = %d start = %d\n"
+ "VLAN = %d start = %d\n"
+ "ILT = %d start = %d\n",
+ p_hwfn->hw_info.resc_num[QED_SB],
+ p_hwfn->hw_info.resc_start[QED_SB],
+ p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_num[QED_VPORT],
+ p_hwfn->hw_info.resc_start[QED_VPORT],
+ p_hwfn->hw_info.resc_num[QED_PQ],
+ p_hwfn->hw_info.resc_start[QED_PQ],
+ p_hwfn->hw_info.resc_num[QED_RL],
+ p_hwfn->hw_info.resc_start[QED_RL],
+ p_hwfn->hw_info.resc_num[QED_MAC],
+ p_hwfn->hw_info.resc_start[QED_MAC],
+ p_hwfn->hw_info.resc_num[QED_VLAN],
+ p_hwfn->hw_info.resc_start[QED_VLAN],
+ p_hwfn->hw_info.resc_num[QED_ILT],
+ p_hwfn->hw_info.resc_start[QED_ILT]);
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+ struct qed_mcp_link_params *link;
+
+ /* Read global nvm_cfg address */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ /* Read Vendor Id / Device Id */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, pci_id);
+ p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
+ NVM_CFG1_GLOB_VENDOR_ID_MASK;
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, core_cfg);
+
+ core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
+ core_cfg);
+ break;
+ }
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
+ offsetof(struct nvm_cfg1_func, device_id);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ if (IS_MF(p_hwfn)) {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
+ } else {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, speed_cap_mask));
+ link->speed.advertised_speeds =
+ link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities =
+ link->speed.advertised_speeds;
+
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
+ link_temp);
+ }
+
+ link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+ link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link->pause.autoneg = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->cdev->mf_mode = MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->cdev->mf_mode = MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
+ p_hwfn->cdev->mf_mode = SF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+ p_hwfn->cdev->mf_mode);
+
+ return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ u32 port_mode;
+ int rc;
+
+ /* Read the port mode */
+ port_mode = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NW_PORT_MODE_BB_B0);
+
+ if (port_mode < 3) {
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ } else if (port_mode <= 5) {
+ p_hwfn->cdev->num_ports_in_engines = 2;
+ } else {
+ DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+
+ /* Default num_ports_in_engines to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+
+ qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+ rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (qed_mcp_is_init(p_hwfn))
+ ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac);
+ else
+ eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ enum qed_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
+
+ qed_hw_get_resc(p_hwfn);
+
+ return rc;
+}
+
+static void qed_get_dev_info(struct qed_dev *cdev)
+{
+ u32 tmp;
+
+ cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_REV);
+ MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+ /* Learn number of HW-functions */
+ tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+ DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+ cdev->num_hwfns = 2;
+ } else {
+ cdev->num_hwfns = 1;
+ }
+
+ cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_TEST_REG) >> 4;
+ MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+ cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_METAL);
+ MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+ DP_INFO(cdev->hwfns,
+ "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ cdev->chip_num, cdev->chip_rev,
+ cdev->chip_bond_id, cdev->chip_metal);
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+ void __iomem *p_regview,
+ void __iomem *p_doorbells,
+ enum qed_pci_personality personality)
+{
+ int rc = 0;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
+ return -EINVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ rc = qed_hw_hwfn_prepare(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+ goto err0;
+ }
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id)
+ qed_get_dev_info(p_hwfn->cdev);
+
+ /* Initialize MCP structure */
+ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = qed_init_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+ goto err2;
+ }
+
+ return rc;
+err2:
+ qed_mcp_free(p_hwfn);
+err1:
+ qed_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+static u32 qed_hw_bar_size(struct qed_dev *cdev,
+ u8 bar_id)
+{
+ u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
+
+ return size / cdev->num_hwfns;
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality)
+{
+ int rc, i;
+
+ /* Store the precompiled init data ptrs */
+ qed_init_iro_array(cdev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
+ cdev->doorbells, personality);
+ if (rc)
+ return rc;
+
+ personality = cdev->hwfns[0].hw_info.personality;
+
+ /* Initialize the rest of the hwfns */
+ for (i = 1; i < cdev->num_hwfns; i++) {
+ void __iomem *p_regview, *p_doorbell;
+
+ p_regview = cdev->regview +
+ i * qed_hw_bar_size(cdev, 0);
+ p_doorbell = cdev->doorbells +
+ i * qed_hw_bar_size(cdev, 1);
+ rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
+ p_doorbell, personality);
+ if (rc) {
+ /* Cleanup previously initialized hwfns */
+ while (--i >= 0) {
+ qed_init_free(&cdev->hwfns[i]);
+ qed_mcp_free(&cdev->hwfns[i]);
+ qed_hw_hwfn_free(&cdev->hwfns[i]);
+ }
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_init_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ }
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain)
+{
+ dma_addr_t p_pbl_phys = 0;
+ void *p_pbl_virt = NULL;
+ dma_addr_t p_phys = 0;
+ void *p_virt = NULL;
+ u16 page_cnt = 0;
+ size_t size;
+
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ size = page_cnt * QED_CHAIN_PAGE_SIZE;
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain mem\n");
+ goto nomem;
+ }
+
+ if (mode == QED_CHAIN_MODE_PBL) {
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys,
+ GFP_KERNEL);
+ if (!p_pbl_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
+ goto nomem;
+ }
+
+ qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use,
+ p_pbl_phys, p_pbl_virt);
+ } else {
+ qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use, mode);
+ }
+
+ return 0;
+
+nomem:
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PAGE_SIZE,
+ p_virt, p_phys);
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
+ p_pbl_virt, p_pbl_phys);
+
+ return -ENOMEM;
+}
+
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ size_t size;
+
+ if (!p_chain->p_virt_addr)
+ return;
+
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->pbl.p_virt_table,
+ p_chain->pbl.p_phys_table);
+ }
+
+ size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+}
+
+static void __qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ int i, j;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct tstorm_per_port_stat tstats;
+ struct port_stats port_stats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+ p_hwfn->storm_stats.mstats.address,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+ p_hwfn->storm_stats.ustats.address,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+ p_hwfn->storm_stats.pstats.address,
+ p_hwfn->storm_stats.pstats.len);
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+ p_hwfn->storm_stats.tstats.address,
+ p_hwfn->storm_stats.tstats.len);
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ if (p_hwfn->mcp_info)
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ stats->no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ stats->ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ stats->tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+
+ stats->rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ stats->rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ stats->rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ stats->rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ stats->rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ stats->rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+
+ stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+
+ stats->tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ stats->tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ stats->tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ stats->tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ stats->tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ stats->tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ stats->tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+ stats->rx_64_byte_packets += port_stats.pmm.r64;
+ stats->rx_127_byte_packets += port_stats.pmm.r127;
+ stats->rx_255_byte_packets += port_stats.pmm.r255;
+ stats->rx_511_byte_packets += port_stats.pmm.r511;
+ stats->rx_1023_byte_packets += port_stats.pmm.r1023;
+ stats->rx_1518_byte_packets += port_stats.pmm.r1518;
+ stats->rx_1522_byte_packets += port_stats.pmm.r1522;
+ stats->rx_2047_byte_packets += port_stats.pmm.r2047;
+ stats->rx_4095_byte_packets += port_stats.pmm.r4095;
+ stats->rx_9216_byte_packets += port_stats.pmm.r9216;
+ stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ stats->rx_crc_errors += port_stats.pmm.rfcs;
+ stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ stats->rx_pause_frames += port_stats.pmm.rxpf;
+ stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ stats->rx_align_errors += port_stats.pmm.raln;
+ stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ stats->rx_oversize_packets += port_stats.pmm.rovr;
+ stats->rx_jabbers += port_stats.pmm.rjbr;
+ stats->rx_undersize_packets += port_stats.pmm.rund;
+ stats->rx_fragments += port_stats.pmm.rfrg;
+ stats->tx_64_byte_packets += port_stats.pmm.t64;
+ stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ stats->tx_pause_frames += port_stats.pmm.txpf;
+ stats->tx_pfc_frames += port_stats.pmm.txpp;
+ stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ stats->tx_total_collisions += port_stats.pmm.tncl;
+ stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+
+ for (j = 0; j < 8; j++) {
+ stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u32 i;
+
+ if (!cdev) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ __qed_get_vport_stats(cdev, stats);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.mstats.address,
+ &mstats,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.ustats.address,
+ &ustats,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.pstats.address,
+ &pstats,
+ p_hwfn->storm_stats.pstats.len);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!cdev->reset_stats)
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ else
+ __qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ DP_NOTICE(p_hwfn,
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+ return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_VPORT);
+ max = min + RESC_NUM(p_hwfn, QED_VPORT);
+ DP_NOTICE(p_hwfn,
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+ return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+ DP_NOTICE(p_hwfn,
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644
index 0000000..e29a3ba
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -0,0 +1,283 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * @brief qed_init_dp - initialize the debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+/**
+ * @brief qed_init_struct - initialize the device structure to
+ * its defaults
+ *
+ * @param cdev
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_free -
+ *
+ * @param cdev
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_alloc -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_setup -
+ *
+ * @param cdev
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_init -
+ *
+ * @param cdev
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ * Pass NULL if not using binary fw file.
+ *
+ * @return int
+ */
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data);
+
+/**
+ * @brief qed_hw_stop -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop_fastpath -should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_start_fastpath -restart fastpath traffic,
+ * only if hw_stop_fastpath was called
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_hw_reset -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_reset(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_prepare -
+ *
+ * @param cdev
+ * @param personality - personality to initialize
+ *
+ * @return int
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality);
+
+/**
+ * @brief qed_hw_remove -
+ *
+ * @param cdev
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * @brief qed_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_ptt
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct qed_dmae_params {
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+};
+
+/**
+ * @brief qed_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief qed_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return int
+ */
+int
+qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain);
+
+/**
+ * @brief qed_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain);
+
+/**
+ * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+/**
+ * @@brief qed_fw_vport - Get absolute vport ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ *
+ * @return int
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644
index 0000000..b2f8e85
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -0,0 +1,5291 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+/********************************/
+/* Add include to common target */
+/********************************/
+
+/* opcodes for the event ring */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_RESERVED,
+ COMMON_EVENT_RESERVED2,
+ COMMON_EVENT_RESERVED3,
+ COMMON_EVENT_RESERVED4,
+ COMMON_EVENT_RESERVED5,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+ COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_RESERVED,
+ COMMON_RAMROD_RESERVED2,
+ COMMON_RAMROD_RESERVED3,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+ __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons /* SPQ Ring Consumer */;
+ __le16 consolid_cons /* Consolidation Ring Consumer */;
+ __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 core_state /* state */;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2 /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 consolid_prod /* physical_q1 */;
+ __le16 reserved16 /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_or_spq_prod /* word4 */;
+ __le16 word5 /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+struct eth_mstorm_per_queue_stat {
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+ struct regpair addr /* Next Page Address */;
+ __le32 reserved[2] /* Reserved */;
+};
+
+union event_ring_element {
+ struct event_ring_entry entry /* Event Ring Entry */;
+ struct event_ring_next_addr next_addr;
+};
+
+enum personality_type {
+ PERSONALITY_RESERVED,
+ PERSONALITY_RESERVED2,
+ PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+ PERSONALITY_RESERVED3,
+ PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_RESERVED4,
+ MAX_PERSONALITY_TYPE
+};
+
+struct pf_start_tunnel_config {
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode /* Multi function mode */;
+ u8 integ_phase /* Integration phase */;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid;
+ u32 outer_tag;
+ u8 reserved0[4];
+};
+
+enum ports_mode {
+ ENGX2_PORTX1 /* 2 engines x 1 port */,
+ ENGX2_PORTX2 /* 2 engines x 2 ports */,
+ ENGX1_PORTX1 /* 1 engine x 1 port */,
+ ENGX1_PORTX2 /* 1 engine x 2 ports */,
+ ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ MAX_PORTS_MODE
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+ __le32 cid /* Slowpath Connection CID */;
+ u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+ u8 protocol_id /* Ramrod Protocol ID */;
+ __le16 echo /* Ramrod echo */;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+ struct ramrod_header hdr /* Ramrod Header */;
+ struct regpair data_ptr;
+};
+
+struct tstorm_per_port_stat {
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair fcoe_irregular_pkt;
+ struct regpair roce_irregular_pkt;
+ struct regpair eth_irregular_pkt;
+ struct regpair toe_irregular_pkt;
+ struct regpair preroce_irregular_pkt;
+};
+
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index /* status block running index */;
+ __le32 reserved1;
+};
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x618000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_PTU,
+ BLOCK_DMAE,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length /* Length in DW */;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo /* PCIe completion address low or grc address */;
+ __le32 comp_addr_hi;
+ __le32 comp_val /* Value to write to copmletion address */;
+ __le32 crc32 /* crc16 result */;
+ __le32 crc_32_c /* crc32_c result */;
+ __le16 crc16 /* crc16 result */;
+ __le16 crc16_c /* crc16_c result */;
+ __le16 crc10 /* crc_t10 result */;
+ __le16 reserved;
+ __le16 xsum16 /* checksum16 result */;
+ __le16 xsum8 /* checksum8 result */;
+};
+
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+enum init_modes {
+ MODE_BB_A0,
+ MODE_RESERVED,
+ MODE_RESERVED2,
+ MODE_ASIC,
+ MODE_RESERVED3,
+ MODE_RESERVED4,
+ MODE_RESERVED5,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_40G,
+ MODE_100G,
+ MODE_EAGLE_ENG1_WORKAROUND,
+ MAX_INIT_MODES
+};
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_RESERVED,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+ TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ u32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS 4
+#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN 19
+#define PXP_WIN_DWORD_SIZE_BITS 10
+#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS 8
+#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS 18
+#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS 6
+#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS 8
+#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM 3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS 3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS 0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT 0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
+
+/* max number of register values following the idle check header */
+#define IDLE_CHK_MAX_DUMP_REGS 2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR 0
+#define IDLE_CHK_QM_RD_WR_BANK 1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* the MCP Trace meta data signautre is duplicated in the perl script that
+ * generats the NVRAM images.
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+ u32 offset;
+ u32 length /* buffer length in bytes */;
+};
+
+/* binary buffer types */
+enum bin_buffer_type {
+ BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_IRO /* internal RAM offsets array */,
+ MAX_BIN_BUFFER_TYPE
+};
+
+/* Chip IDs */
+enum chip_ids {
+ CHIP_BB_A0 /* BB A0 chip ID */,
+ CHIP_BB_B0 /* BB B0 chip ID */,
+ CHIP_K2 /* AH chip ID */,
+ MAX_CHIP_IDS
+};
+
+enum idle_chk_severity_types {
+ IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+struct init_array_raw_hdr {
+ __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+struct init_array_standard_hdr {
+ __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+struct init_array_zipped_hdr {
+ __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+struct init_array_pattern_hdr {
+ __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id /* Callback ID */;
+ __le16 block_id /* Blocks ID */;
+};
+
+/* init comparison types */
+enum init_comparison_types {
+ INIT_COMPARISON_EQ /* init value is included in the init command */,
+ INIT_COMPARISON_OR /* init value is all zeros */,
+ INIT_COMPARISON_AND /* init value is an array of values */,
+ MAX_INIT_COMPARISON_TYPES
+};
+
+/* init operation: delay */
+struct init_delay_op {
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+ __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
+};
+
+/* init operation: if_phase */
+struct init_if_phase_op {
+ __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2 /* Init param 2 */;
+};
+
+/* init array params */
+struct init_op_array_params {
+ __le16 size /* array size in dwords */;
+ __le16 offset /* array start offset in dwords */;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+ __le32 data;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/* init operation: read */
+struct init_read_op {
+ __le32 op_data;
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_COMP_MASK 0x7
+#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 7
+#define INIT_READ_OP_POLL_MASK 0x1
+#define INIT_READ_OP_POLL_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+ __le32 expected_val;
+};
+
+/* Init operations union */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+/* Init command operation types */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_IF_MODE,
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+ u32 base /* RAM field offset */;
+ u16 m1 /* multiplier 1 */;
+ u16 m2 /* multiplier 2 */;
+ u16 m3 /* multiplier 3 */;
+ u16 size /* RAM field size */;
+};
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+ u8 num_active_phys_tcs;
+ u16 num_pbf_cmd_lines;
+ u16 num_btb_blocks;
+ __le16 reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+ u8 reserved;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ u32 vport_rl;
+ u16 vport_wfq;
+ u16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD \
+ 0x00f000UL
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM \
+ 0x010000UL
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM \
+ 0x011000UL
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
+ 0x012000UL
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM \
+ 0x013000UL
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
+ 0x014000UL
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
+ 0x015000UL
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM \
+ 0x016000UL
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM \
+ 0x017000UL
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM \
+ 0x018000UL
+
+/**
+ * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
+};
+
+/**
+ * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param port_params - array of size MAX_NUM_PORTS with
+ * arameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_first_pf;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u8 start_vport;
+ u8 num_vports;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
+/**
+ * @brief qed_init_pf_rl Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
+
+/**
+ * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl);
+/**
+ * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
+ */
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \
+ ((port_id) * \
+ IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \
+ ((vf_id) * \
+ IRO[2].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base)
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \
+ ((pf_id) * \
+ IRO[4].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[4].size)
+/* Ustorm Completion ring consumer */
+#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \
+ ((global_queue_id) * \
+ IRO[5].m1))
+#define USTORM_CQ_CONS_SIZE (IRO[5].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \
+ ((core_rx_queue_id) * \
+ IRO[12].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size)
+/* Tstorm LiteL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
+ ((core_rx_q_id) * \
+ IRO[13].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
+ ((core_rx_q_id) * \
+ IRO[14].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
+ ((core_txst_id) * \
+ IRO[15].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
+ ((stat_counter_id) * \
+ IRO[16].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \
+ ((queue_id) * \
+ IRO[17].m1))
+#define MSTORM_PRODS_SIZE (IRO[17].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \
+ ((stat_counter_id) * \
+ IRO[19].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[19].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \
+ ((queue_id) * \
+ IRO[20].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \
+ ((stat_counter_id) * \
+ IRO[21].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \
+ ((pf_id) * \
+ IRO[22].m1))
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \
+ ((queue_id) * \
+ IRO[23].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \
+ ((rss_id) * \
+ IRO[24].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \
+ ((rss_id) * \
+ IRO[25].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \
+ ((pf_id) * \
+ IRO[26].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \
+ ((cmdq_queue_id) * \
+ IRO[27].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \
+ ((rq_queue_id) * \
+ IRO[28].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size)
+/* Pstorm RoCE statistics */
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \
+ ((stat_counter_id) * \
+ IRO[29].m1))
+#define PSTORM_ROCE_STAT_SIZE (IRO[29].size)
+/* Tstorm RoCE statistics */
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
+ ((stat_counter_id) * \
+ IRO[30].m1))
+#define TSTORM_ROCE_STAT_SIZE (IRO[30].size)
+
+static const struct iro iro_arr[31] = {
+ { 0x10, 0x0, 0x0, 0x0, 0x8 },
+ { 0x4448, 0x60, 0x0, 0x0, 0x60 },
+ { 0x498, 0x8, 0x0, 0x0, 0x4 },
+ { 0x494, 0x0, 0x0, 0x0, 0x4 },
+ { 0x10, 0x8, 0x0, 0x0, 0x2 },
+ { 0x90, 0x8, 0x0, 0x0, 0x2 },
+ { 0x4540, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x39e0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x2598, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x4350, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x52d0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x7a48, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x100, 0x8, 0x0, 0x0, 0x8 },
+ { 0x5808, 0x10, 0x0, 0x0, 0x10 },
+ { 0xb100, 0x30, 0x0, 0x0, 0x30 },
+ { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
+ { 0x54f8, 0x40, 0x0, 0x0, 0x40 },
+ { 0x200, 0x10, 0x0, 0x0, 0x8 },
+ { 0x9e70, 0x0, 0x0, 0x0, 0x4 },
+ { 0x7ca0, 0x40, 0x0, 0x0, 0x30 },
+ { 0xd00, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2790, 0x80, 0x0, 0x0, 0x38 },
+ { 0xa520, 0xf0, 0x0, 0x0, 0xf0 },
+ { 0x80, 0x8, 0x0, 0x0, 0x8 },
+ { 0xac0, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2580, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2500, 0x8, 0x0, 0x0, 0x8 },
+ { 0x440, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1800, 0x8, 0x0, 0x0, 0x2 },
+ { 0x27c8, 0x80, 0x0, 0x0, 0x10 },
+ { 0x4710, 0x10, 0x0, 0x0, 0x10 },
+};
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2232
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6663
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6665
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6667
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29834
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29901
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29902
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29964
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30505
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30762
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30764
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30796
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30812
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30846
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31006
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31007
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31520
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 33056
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33568
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431
+
+#define RUNTIME_ARRAY_SIZE 34432
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2] /* padding */;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2] /* padding */;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+};
+
+enum eth_filter_action {
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+ ETH_FILTER_ACTION_REPLACE,
+ MAX_ETH_FILTER_ACTION
+};
+
+struct eth_filter_cmd {
+ u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+ u8 vport_id /* the vport id */;
+ u8 action /* filter command action: add/remove/replace */;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+struct eth_filter_cmd_header {
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+enum eth_filter_type {
+ ETH_FILTER_TYPE_MAC,
+ ETH_FILTER_TYPE_VLAN,
+ ETH_FILTER_TYPE_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC,
+ ETH_FILTER_TYPE_INNER_VLAN,
+ ETH_FILTER_TYPE_INNER_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_VNI,
+ MAX_ETH_FILTER_TYPE
+};
+
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+ ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+ ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+ ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+ ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+ ETH_RAMROD_RESERVED,
+ ETH_RAMROD_RESERVED2,
+ ETH_RAMROD_RESERVED3,
+ ETH_RAMROD_RESERVED4,
+ ETH_RAMROD_RESERVED5,
+ ETH_RAMROD_RESERVED6,
+ ETH_RAMROD_RESERVED7,
+ ETH_RAMROD_RESERVED8,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+struct eth_vport_rss_config {
+ __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED,
+ ETH_VPORT_RSS_MODE_REGULAR,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+struct eth_vport_rx_mode {
+ __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+ __le16 reserved2[3];
+};
+
+struct eth_vport_tpa_param {
+ u64 reserved[2];
+};
+
+struct eth_vport_tx_mode {
+ __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+ __le16 reserved2[3];
+};
+
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[4];
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair sge_base;
+};
+
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
+};
+
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 init_sge_ring_flg;
+ u8 vport_id;
+ u8 pxp_tph_valid_sge;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[6];
+ struct regpair sge_base;
+};
+
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 tc;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ u8 pxp_st_hint;
+ u8 reserved1[3];
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ struct regpair pbl_base_addr;
+};
+
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+struct vport_filter_update_ramrod_data {
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+ u8 default_vlan_en;
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ __le16 default_vlan;
+ u8 untagged;
+ u8 reserved[7];
+};
+
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_sge_param_flg;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+ u8 default_vlan_en;
+ u8 update_default_vlan_flg;
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 reserved;
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+struct vport_update_ramrod_data {
+ struct vport_update_ramrod_data_cmn common;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+#define VF_MAX_STATIC 192 /* In case of K2 */
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2 /* Global */
+#define MCP_GLOB_PORT_MAX 4 /* Global */
+#define MCP_GLOB_FUNC_MAX 16 /* Global */
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) ((((_offsize & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
+#define QED_SECTION_SIZE(_offsize) (((_offsize & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section.
+ */
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + \
+ (QED_SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
+ * Use offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
+ offsetof(struct \
+ mcp_public_data, \
+ sections[_section]))
+/* PHY configuration */
+struct pmm_phy_cfg {
+ u32 speed;
+#define PMM_SPEED_AUTONEG 0
+
+ u32 pause; /* bitmask */
+#define PMM_PAUSE_NONE 0x0
+#define PMM_PAUSE_AUTONEG 0x1
+#define PMM_PAUSE_RX 0x2
+#define PMM_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
+ u32 loopback_mode;
+#define PMM_LOOPBACK_NONE 0
+#define PMM_LOOPBACK_INT_PHY 1
+#define PMM_LOOPBACK_EXT_PHY 2
+#define PMM_LOOPBACK_EXT 3
+#define PMM_LOOPBACK_MAC 4
+
+ /* features */
+ u32 feature_config_flags;
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+ u64 tlpiec;
+ u64 tncl;
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+ u64 rxpok;
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct pmm_stats pmm;
+};
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+/**************************************
+* LLDP and DCBX HSI structures
+**************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+
+enum lldp_agent_e {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status; /* TBD */
+
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC 4
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+/* Not in use
+ * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ */
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+
+ /* PFC feature */
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/* */
+/* P U B L I C G L O B A L */
+/* */
+/**************************************/
+struct public_global {
+ u32 max_path;
+#define MAX_PATH_BIG_BEAR 2
+#define MAX_PATH_K2 1
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+};
+
+/**************************************/
+/* */
+/* P U B L I C P A T H */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Shared Memory 2 Region *
+****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 128 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE 0
+#define ACCUM_ACK_PF_SHIFT 0
+
+#define ACCUM_ACK_VF_BASE 8
+#define ACCUM_ACK_VF_SHIFT 3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P O R T */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Driver <-> FW Mailbox *
+****************************************************************************/
+
+struct public_port {
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
+
+ /* validity bits */
+#define MCP_VALIDITY_PCI_CFG 0x00100000
+#define MCP_VALIDITY_MB 0x00200000
+#define MCP_VALIDITY_DEV_INFO 0x00400000
+#define MCP_VALIDITY_RESERVED 0x00000007
+
+ /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+
+ /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP \
+ 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED \
+ 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT \
+ 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET 0
+#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+#define LFA_NO_REASON (0 << 0)
+#define LFA_LINK_DOWN BIT(0)
+#define LFA_FORCE_INIT BIT(1)
+#define LFA_LOOPBACK_MISMATCH BIT(2)
+#define LFA_SPEED_MISMATCH BIT(3)
+#define LFA_FLOW_CTRL_MISMATCH BIT(4)
+#define LFA_ADV_SPEED_MISMATCH BIT(5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET 16
+#define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ u32 link_change_count;
+
+ /* LLDP params */
+ struct lldp_config_params_s lldp_config_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+};
+
+/**************************************/
+/* */
+/* P U B L I C F U N C */
+/* */
+/**************************************/
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 reserved[8];
+
+ u32 config;
+
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 1 % */
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
+
+ u32 mac_upper; /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation; /* vf per pf */
+
+ u32 preserve_data; /* Will be used bt CCM */
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0xff000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+};
+
+/**************************************/
+/* */
+/* P U B L I C M B */
+/* */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_lower;
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+union drv_union_data {
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+ struct mcp_mac wol_mac;
+
+ struct pmm_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64; /* For PHY / AVS commands */
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ struct drv_version_stc drv_version;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+ /* Params - FORCE - Reinitialize the link regardless of LFA */
+ /* - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+
+#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+
+ /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+ /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+ /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+ /* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
+
+/* configure vf MSIX params*/
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+ u32 fw_mb_header;
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_FLR_ACK 0x02000000
+#define FW_MSG_CODE_FLR_NACK 0x02100000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_PHY_OK 0x00110000
+#define FW_MSG_CODE_PHY_ERROR 0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+* Description
+* Incremental Aggregative
+* 8-bit MFW counter per message
+* 8-bit ack-counter per message
+* Capabilities
+* Provides up to 256 aggregative message per type
+* Provides 4 message types in dword
+* Message type pointers to byte offset
+* Backward Compatibility by using sizeof for the counters.
+* No lock requires for 32bit messages
+* Limitations:
+* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+* is required to prevent data corruption.
+**********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/* */
+/* P U B L I C D A T A */
+/* */
+/**************************************/
+enum public_sections {
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+struct mcp_public_data {
+ /* The sections fields is an array */
+ u32 num_sections;
+ offsize_t sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+ struct drv_ver_info_stc drv_info;
+};
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+/******************************************
+* nvm_cfg1 structs
+******************************************/
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0; /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+
+ u32 engineering_change[3]; /* 0x4 */
+
+ u32 manufacturing_id; /* 0x10 */
+
+ u32 serial_number[4]; /* 0x14 */
+
+ u32 pcie_cfg; /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+
+ u32 mgmt_traffic; /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
+#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+
+ u32 core_cfg; /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+
+ u32 e_lane_cfg1; /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 e_lane_cfg2; /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET 12
+#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+
+ u32 f_lane_cfg1; /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 f_lane_cfg2; /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+
+ u32 eagle_preemphasis; /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 eagle_driver_current; /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 falcon_preemphasis; /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 falcon_driver_current; /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 pci_id; /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+
+ u32 pci_subsys_id; /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+
+ u32 bar; /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+
+ u32 eagle_txfir_main; /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 eagle_txfir_post; /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 falcon_txfir_main; /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 falcon_txfir_post; /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 manufacture_ver; /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+
+ u32 manufacture_time; /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+
+ u32 led_global_settings; /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+
+ u32 generic_cont1; /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+
+ u32 mbi_version; /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date; /* 0x80 */
+
+ u32 misc_sig; /* 0x84 */
+
+ /* Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+
+ u32 reserved[46]; /* 0x88 */
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[30]; /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+ u32 power_dissipated; /* 0x0 */
+#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24
+
+ u32 power_consumed; /* 0x4 */
+#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24
+
+ u32 generic_cont0; /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
+#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+
+ u32 pcie_cfg; /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+
+ u32 features; /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+
+ u32 speed_cap_mask; /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
+
+ u32 link_settings; /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
+
+ u32 phy_cfg; /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10
+#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
+
+ u32 mgmt_traffic; /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2
+
+ u32 ext_phy; /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+
+ u32 mba_cfg1; /* 0x28 */
+#define NVM_CFG1_PORT_MBA_MASK 0x00000001
+#define NVM_CFG1_PORT_MBA_OFFSET 0
+#define NVM_CFG1_PORT_MBA_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_ENABLED 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED5_2K 0x1
+#define NVM_CFG1_PORT_RESERVED5_4K 0x2
+#define NVM_CFG1_PORT_RESERVED5_8K 0x3
+#define NVM_CFG1_PORT_RESERVED5_16K 0x4
+#define NVM_CFG1_PORT_RESERVED5_32K 0x5
+#define NVM_CFG1_PORT_RESERVED5_64K 0x6
+#define NVM_CFG1_PORT_RESERVED5_128K 0x7
+#define NVM_CFG1_PORT_RESERVED5_256K 0x8
+#define NVM_CFG1_PORT_RESERVED5_512K 0x9
+#define NVM_CFG1_PORT_RESERVED5_1M 0xA
+#define NVM_CFG1_PORT_RESERVED5_2M 0xB
+#define NVM_CFG1_PORT_RESERVED5_4M 0xC
+#define NVM_CFG1_PORT_RESERVED5_8M 0xD
+#define NVM_CFG1_PORT_RESERVED5_16M 0xE
+#define NVM_CFG1_PORT_RESERVED5_32M 0xF
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21
+
+ u32 mba_cfg2; /* 0x2C */
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0
+#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000
+#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16
+
+ u32 vf_cfg; /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED6_4K 0x1
+#define NVM_CFG1_PORT_RESERVED6_8K 0x2
+#define NVM_CFG1_PORT_RESERVED6_16K 0x3
+#define NVM_CFG1_PORT_RESERVED6_32K 0x4
+#define NVM_CFG1_PORT_RESERVED6_64K 0x5
+#define NVM_CFG1_PORT_RESERVED6_128K 0x6
+#define NVM_CFG1_PORT_RESERVED6_256K 0x7
+#define NVM_CFG1_PORT_RESERVED6_512K 0x8
+#define NVM_CFG1_PORT_RESERVED6_1M 0x9
+#define NVM_CFG1_PORT_RESERVED6_2M 0xA
+#define NVM_CFG1_PORT_RESERVED6_4M 0xB
+#define NVM_CFG1_PORT_RESERVED6_8M 0xC
+#define NVM_CFG1_PORT_RESERVED6_16M 0xD
+#define NVM_CFG1_PORT_RESERVED6_32M 0xE
+#define NVM_CFG1_PORT_RESERVED6_64M 0xF
+
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+
+ u32 led_port_settings; /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
+
+ u32 transceiver_00; /* 0x40 */
+
+ /* Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ /* Define the GPIO mux settings to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+
+ u32 reserved[133]; /* 0x44 */
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+
+ u32 rsrv1; /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+
+ u32 rsrv2; /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+
+ u32 device_id; /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16
+
+ u32 cmn_cfg; /* 0x14 */
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
+#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
+#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+
+ u32 pci_cfg; /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
+
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
+
+ u32 reserved[9]; /* 0x2C */
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob; /* 0x0 */
+
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+};
+
+/******************************************
+* nvm_cfg structs
+******************************************/
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define NVM_CRC_SIZE (sizeof(u32))
+
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+/****************************************************************************
+* Boot Strap Region *
+****************************************************************************/
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len; /* boot code length (in dwords) */
+ u32 code_start_addr;
+ u32 crc; /* 32-bit CRC */
+};
+
+/****************************************************************************
+* Directories Region *
+****************************************************************************/
+struct nvm_code_entry {
+ u32 image_type; /* Image type */
+ u32 nvm_start_addr; /* NVM address of the image */
+ u32 len; /* Include CRC */
+ u32 sram_start_addr;
+ u32 sram_run_addr; /* Relevant in case of MIM only */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 200
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+
+#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ (_num_images - \
+ 1) * sizeof(struct nvm_code_entry) + \
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ /* This array length depends on the number of VPD fields */
+ u8 vpd_data[1];
+};
+
+/****************************************************************************
+* NVRAM FULL MAP *
+****************************************************************************/
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
+#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
+#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
+
+#define LIM_MAX_SIZE ((2 * \
+ FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) - \
+ NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
+ FPGA_MIM_MAX_SIZE)
+#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
+ ((idx == \
+ NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
+#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
+ MIM_MAX_SIZE(is_asic) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+/* Address
+ * +-------------------+ 0x000000
+ * | Bootstrap: |
+ * | magic_number |
+ * | sram_start_addr |
+ * | code_len |
+ * | code_start_addr |
+ * | crc |
+ * +-------------------+ 0x000014
+ * | rsrv |
+ * +-------------------+ 0x000040
+ * | LIM |
+ * +-------------------+ 0x002000
+ * | Dir1 |
+ * +-------------------+ 0x003000
+ * | Dir2 |
+ * +-------------------+ 0x004000
+ * | MIM1 |
+ * +-------------------+ 0x130000
+ * | MIM2 |
+ * +-------------------+ 0x25C000
+ * | Rest Images: |
+ * | TIM1/2 |
+ * | MFW_TRACE1/2 |
+ * | Eagle/Falcon FW |
+ * | PCIE/AVS FW |
+ * | MBA/CCM/L2B |
+ * | VPD |
+ * | optic_modules |
+ * | ... |
+ * +-------------------+ 0x400000
+ */
+struct nvm_image {
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+ /* NVM Offset (size) */
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+
+ /* MIM1_IMAGE 0x004000 (0x12c000) */
+ /* MIM2_IMAGE 0x130000 (0x12c000) */
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+}; /* 0x134 */
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+
+ /* This array length depends on the no_hw_sets */
+ struct hw_set_info hw_sets[1];
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644
index 0000000..ffa9927
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -0,0 +1,776 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
+
+struct qed_ptt {
+ struct list_head list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+};
+
+struct qed_ptt_pool {
+ struct list_head free_list;
+ spinlock_t lock; /* ptt synchronized access */
+ struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
+ GFP_ATOMIC);
+ int i;
+
+ if (!p_pool)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+ if (i >= RESERVED_PTT_MAX)
+ list_add(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+ spin_lock_init(&p_pool->lock);
+
+ return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+ }
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ unsigned int i;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+ struct qed_ptt, list_entry);
+ list_del(&p_ptt->list_entry);
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+ return p_ptt;
+ }
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+ usleep_range(1000, 2000);
+ }
+
+ DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+ return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+ list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, offset),
+ le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, u32 val)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+ u32 val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+ return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *addr,
+ u32 hw_addr,
+ size_t n,
+ bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ size_t quota, done = 0;
+ u32 __iomem *reg_addr;
+
+ while (done < n) {
+ quota = min_t(size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+ done += quota;
+ }
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest, u32 hw_addr, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, void *src, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ /* Every pretend undos previous pretends, including
+ * previous port pretend.
+ */
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+/* DMAE */
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct qed_dmae_params *p_params)
+{
+ u32 opcode = 0;
+ u16 opcodeB = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+ : DMAE_CMD_SRC_MASK_PCIE) <<
+ DMAE_CMD_SRC_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT);
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+ : DMAE_CMD_DST_MASK_PCIE) <<
+ DMAE_CMD_DST_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT);
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+ opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+
+ opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+
+ opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+
+ /* reset source address in next go */
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ /* reset dest address in next go */
+ opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
+ DMAE_CMD_DST_ADDR_RESET_SHIFT);
+
+ opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT);
+
+ opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
+ DMAE_CMD_DST_VF_ID_SHIFT);
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int
+qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ int qed_status = 0;
+
+ /* verify address is not NULL */
+ if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
+ ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ DP_NOTICE(p_hwfn,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and the rest are result registers
+ * (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)command) + i) : 0;
+
+ qed_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ qed_dmae_idx_to_go_cmd(idx_cmd),
+ DMAE_GO_VALUE);
+
+ return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_addr,
+ GFP_KERNEL);
+ if (!*p_comp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_addr, GFP_KERNEL);
+ if (!*p_cmd) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_addr, GFP_KERNEL);
+ if (!*p_buff) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+ goto err;
+ }
+
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+ return 0;
+err:
+ qed_dmae_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ /* Just make sure no one is in the middle */
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ if (p_hwfn->dmae_info.p_completion_word) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_hwfn->dmae_info.p_completion_word,
+ p_phys);
+ p_hwfn->dmae_info.p_completion_word = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_hwfn->dmae_info.p_dmae_cmd,
+ p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys);
+ p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+ }
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+ u32 wait_cnt = 0;
+ u32 wait_cnt_limit = 10000;
+
+ int qed_status = 0;
+
+ barrier();
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ udelay(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ qed_status = -EBUSY;
+ break;
+ }
+
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ barrier();
+ }
+
+ if (qed_status == 0)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 length)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ int qed_status = 0;
+
+ switch (src_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(uintptr_t)src_addr,
+ length * sizeof(u32));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dst_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd->length = cpu_to_le16((u16)length);
+
+ qed_dmae_post_command(p_hwfn, p_ptt);
+
+ qed_status = qed_dmae_operation_wait(p_hwfn);
+
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+ src_addr,
+ dst_addr,
+ length);
+ return qed_status;
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+ memcpy((void *)(uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length * sizeof(u32));
+
+ return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr, u64 dst_addr,
+ u8 src_type, u8 dst_type,
+ u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ int qed_status = 0;
+ u32 offset = 0;
+
+ qed_dmae_opcode(p_hwfn,
+ (src_type == QED_DMAE_ADDRESS_GRC),
+ (dst_type == QED_DMAE_ADDRESS_GRC),
+ p_params);
+
+ cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+ if (src_type == QED_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status,
+ src_addr,
+ dst_addr,
+ length_cur);
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct qed_dmae_params params;
+ int rc;
+
+ memset(¶ms, 0, sizeof(struct qed_dmae_params));
+ params.flags = flags;
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_GRC,
+ size_in_dwords, ¶ms);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *p_params)
+{
+ u16 pq_id = 0;
+
+ if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
+ !p_params) {
+ DP_NOTICE(p_hwfn,
+ "Protocol %d received NULL PQ params\n",
+ proto);
+ return 0;
+ }
+
+ switch (proto) {
+ case PROTOCOLID_CORE:
+ if (p_params->core.tc == LB_TC)
+ pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
+ case PROTOCOLID_ETH:
+ pq_id = p_params->eth.tc;
+ break;
+ default:
+ pq_id = 0;
+ }
+
+ pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
+
+ return pq_id;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644
index 0000000..e56d433
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -0,0 +1,263 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+
+#define DMAE_CMD_SIZE 14
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+ * @brief qed_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr);
+
+/**
+ * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct qed_ptt *
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * @brief qed_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
+
+/**
+ * @brief qed_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr);
+
+/**
+ * @brief qed_memcpy_from - copy n bytes from BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ size_t n);
+
+/**
+ * @brief qed_memcpy_to - copy n bytes to BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ size_t n);
+/**
+ * @brief qed_fid_pretend - pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid);
+
+/**
+ * @brief qed_port_pretend - pretend to another port when
+ * accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id);
+
+/**
+ * @brief qed_port_unpretend - cancel any previously set port
+ * pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ * @param idx
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * @brief qed_dmae_info_alloc - Init the dmae_info structure
+ * which is part of p_hwfn.
+ * @param p_hwfn
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_dmae_info_free - Free the dmae_info structure
+ * which is part of p_hwfn
+ *
+ * @param p_hwfn
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+ struct {
+ u8 tc;
+ } core;
+
+ struct {
+ u8 is_vf;
+ u8 vf_id;
+ u8 tc;
+ } eth;
+};
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *params);
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *fw_data);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644
index 0000000..0b21a55
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -0,0 +1,798 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+enum cminterface {
+ MCM_SEC,
+ MCM_PRI,
+ UCM_SEC,
+ UCM_PRI,
+ TCM_SEC,
+ TCM_PRI,
+ YCM_SEC,
+ YCM_PRI,
+ XCM_SEC,
+ XCM_PRI,
+ NUM_OF_CM_INTERFACES
+};
+
+/* general constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, \
+ 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
+ 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID 0xffff
+/* feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND 6250000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL 4375000
+#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
+/* RL constants */
+#define QM_RL_UPPER_BOUND 6250000
+#define QM_RL_PERIOD 5 /* in us */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate) max_t(u32, \
+ (((rate ? rate : 1000000) \
+ * QM_RL_PERIOD) / 8), 1)
+#define QM_RL_MAX_INC_VAL 4375000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define EAGLE_WORKAROUND_TC 7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES 150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
+ 4) * \
+ 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS 38
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_EAGLE_WORKAROUND_BLOCKS 4
+#define BTB_PURE_LB_FACTOR 10
+#define BTB_PURE_LB_RATIO 7
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 0x2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
+ _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, \
+ value) SET_FIELD(var[cmd ## _ ## field ## \
+ _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
+ (max_phy_tcs_pr_port) \
+ + (tc))
+#define LB_VOQ(port) ( \
+ MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phy_tcs_pr_port) \
+ ((tc) < \
+ LB_TC ? PHYS_VOQ(port, \
+ tc, \
+ max_phy_tcs_pr_port) \
+ : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
+ bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ /* enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (1 << MAX_NUM_VOQS) - 1);
+ /* write RL period */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIOD_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
+ bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
+ bool vport_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ vport_rl_en ? 1 : 0);
+ if (vport_rl_en) {
+ /* write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
+ bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 voq,
+ u16 cmdq_lines)
+{
+ u32 qm_line_crd;
+
+ /* In A0 - Limit the size of pbf queue so that only 511 commands with
+ * the minimum size of 4 (FCoE minimum size)
+ */
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+
+ if (is_bb_a0)
+ cmdq_lines = min_t(u32, cmdq_lines, 1022);
+ qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void qed_cmdq_lines_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u8 tc, voq, port_id;
+
+ /* clear PBF lines for all VOQs */
+ for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (port_params[port_id].active) {
+ u16 phys_lines, phys_lines_per_tc;
+ u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* find #lines to divide between the active
+ * physical TCs.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+ /* find #lines per active physical TC */
+ phys_lines_per_tc = phys_lines / phys_tcs;
+ /* init registers per active TC */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
+ }
+ /* init registers for pure LB TC */
+ qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
+ }
+ }
+}
+
+static void qed_btb_blocks_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id;
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ u32 temp;
+ u8 phys_tcs;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* find blocks per physical TC. use factor to avoid
+ * floating arithmethic.
+ */
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (phys_tcs * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks / BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+
+ /* init physical TCs */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+ phys_blocks);
+ }
+
+ /* init pure LB TC */
+ temp = LB_VOQ(port_id);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+ pure_lb_blocks);
+ }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void qed_tx_pq_map_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+ u16 i, pq_id, pq_group;
+
+ /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(p_params->pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+ /* go over all Tx PQs */
+ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ bool is_vf_pq = (i >= p_params->num_pf_pqs);
+ struct qm_rf_pq_map tx_pq_map;
+
+ /* update first Tx PQ of VPORT/TC */
+ u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
+ p_params->start_vport;
+ u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
+ u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ /* create new VP PQ */
+ pq_ids[p_params->pq_params[i].tc_id] = pq_id;
+ first_tx_pq_id = pq_id;
+ /* map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPMAP_RT_OFFSET +
+ first_tx_pq_id,
+ (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (p_params->pf_id <<
+ QM_WFQ_VP_PQ_PF_SHIFT));
+ }
+ /* fill PQ map entry */
+ memset(&tx_pq_map, 0, sizeof(tx_pq_map));
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+ is_vf_pq ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+ is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+ p_params->pq_params[i].wrr_group);
+ /* write PQ map entry to CAM */
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+ *((u32 *)&tx_pq_map));
+ /* set base address */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ /* check if VF PQ */
+ if (is_vf_pq) {
+ /* if PQ is associated with a VF, add indication
+ * to PQ VF mask
+ */
+ tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+ (1 << (pq_id % tx_pq_vf_mask_width));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+
+ /* store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++) {
+ if (tx_pq_vf_mask[i]) {
+ if (is_bb_a0) {
+ u32 curr_mask = 0, addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
+ if (!p_params->is_first_pf)
+ curr_mask = qed_rd(p_hwfn, p_ptt,
+ addr);
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+
+ STORE_RT_REG(p_hwfn, addr,
+ curr_mask | tx_pq_vf_mask[i]);
+ } else {
+ u32 addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+ STORE_RT_REG(p_hwfn, addr,
+ tx_pq_vf_mask[i]);
+ }
+ }
+ }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
+{
+ u16 i, pq_id;
+
+ /* a single other PQ group is used in each PF,
+ * where PQ group i is used in PF i.
+ */
+ u16 pq_group = pf_id;
+ u32 pq_size = num_pf_cids + num_tids;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+ /* set base address */
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u32 crd_reg_offset;
+ u32 inc_val;
+ u16 i;
+
+ if (p_params->pf_id < MAX_NUM_PFS_BB)
+ crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
+ else
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
+
+ inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+
+ for (i = 0; i < num_tx_pqs; i++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+
+ OVERWRITE_RT_REG(p_hwfn,
+ crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+ return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 tc, i, vport_id;
+ u32 inc_val;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
+ u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ /* each VPORT can have several VPORT PQ IDs for
+ * different TCs
+ */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = pq_ids[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
+ STORE_RT_REG(p_hwfn, temp + vport_pq_id,
+ QM_WFQ_UPPER_BOUND |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 i, vport_id;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 reg_val, i;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ i++) {
+ udelay(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+
+ /* check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Timeout when waiting for QM SDM command ready signal\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
+{
+ if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+ return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
+{
+ /* init AFullOprtnstcCrdMask */
+ u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (p_params->pf_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (p_params->vport_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (p_params->pf_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (p_params->vport_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+ qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+ qed_cmdq_lines_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ qed_btb_blocks_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+ p_params->num_tids) *
+ QM_OTHER_PQS_PER_PF;
+ u8 tc, i;
+
+ /* clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < p_params->num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+ /* map Other PQs (if any) */
+ qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
+ p_params->num_pf_cids, p_params->num_tids, 0);
+
+ /* map Tx PQs */
+ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+
+ if (p_params->pf_wfq)
+ if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+ return -1;
+
+ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+ return -1;
+
+ if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLPFCRD + pf_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + vport_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+ return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+
+ /* set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+ /* if last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+ cmd_arr[0], cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644
index 0000000..796f139
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -0,0 +1,531 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+ cdev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+ p_hwfn->rt_data[i].b_valid = false;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val)
+{
+ p_hwfn->rt_data[rt_offset].init_val = val;
+ p_hwfn->rt_data[rt_offset].b_valid = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data[rt_offset + i].init_val = val[i];
+ p_hwfn->rt_data[rt_offset + i].b_valid = true;
+ }
+}
+
+static void qed_init_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 rt_offset,
+ u32 size)
+{
+ struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++) {
+ if (!rt_data[i].b_valid)
+ continue;
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+ }
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rt_data *rt_data;
+
+ rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
+ if (!rt_data)
+ return -ENOMEM;
+
+ p_hwfn->rt_data = rt_data;
+
+ return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->rt_data);
+ p_hwfn->rt_data = NULL;
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size,
+ const u32 *buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ int rc = 0;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+ const u32 *data = buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(buf + dmae_data_offset),
+ addr, size, 0);
+ }
+
+ return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+ memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ /* invoke the DMAE virtual/physical buffer API with
+ * 1. DMAE init channel
+ * 2. addr,
+ * 3. p_hwfb->temp_data,
+ * 4. fill_count
+ */
+
+ return qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(&zero_buffer[0]),
+ addr, fill_count,
+ QED_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+ u32 offset, output_len, input_len, max_size;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ int rc = 0;
+ u32 size;
+
+ array_data = cdev->fw_data->arr_data;
+
+ hdr = (union init_array_hdr *)(array_data +
+ dmae_array_offset);
+ data = le32_to_cpu(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data,
+ INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ memset(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = qed_unzip_data(p_hwfn, input_len,
+ (u8 *)&array_data[offset],
+ max_size, (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+ rc = -EINVAL;
+ }
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ union init_write_args *arg = &cmd->args;
+ int rc = 0;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn,
+ "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+ addr);
+ return -EINVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ qed_wr(p_hwfn, p_ptt, addr,
+ le32_to_cpu(arg->inline_val));
+ break;
+ case INIT_SRC_ZEROS:
+ if (b_must_dmae ||
+ (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ else
+ qed_init_fill(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ break;
+ case INIT_SRC_ARRAY:
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ qed_init_rt(p_hwfn, p_ptt, addr,
+ le16_to_cpu(arg->runtime.offset),
+ le16_to_cpu(arg->runtime.size));
+ break;
+ }
+
+ return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+ return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_read_op *cmd)
+{
+ u32 data = le32_to_cpu(cmd->op_data);
+ u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+
+ bool (*comp_check)(u32 val,
+ u32 expected_val);
+ u32 delay = QED_INIT_POLL_PERIOD_US, val;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ data = le32_to_cpu(cmd->op_data);
+ if (GET_FIELD(data, INIT_READ_OP_POLL)) {
+ int i;
+
+ switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
+ case INIT_COMPARISON_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_COMPARISON_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_COMPARISON_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ comp_check = NULL;
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ data);
+ return;
+ }
+
+ for (i = 0;
+ i < QED_INIT_MAX_POLL_COUNT &&
+ !comp_check(val, le32_to_cpu(cmd->expected_val));
+ i++) {
+ udelay(delay);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == QED_INIT_MAX_POLL_COUNT)
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ addr, le32_to_cpu(cmd->expected_val),
+ val, data);
+ }
+}
+
+/* init_ops callbacks entry point */
+static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+ u16 *offset,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = cdev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & (1 << tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd,
+ int modes)
+{
+ u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+ if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
+ struct init_if_phase_op *p_cmd,
+ u32 phase,
+ u32 phase_id)
+{
+ u32 data = le32_to_cpu(p_cmd->phase_data);
+ u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ bool b_dmae = false;
+ int rc = 0;
+
+ num_init_ops = cdev->fw_data->init_ops_size;
+ init_ops = cdev->fw_data->init_ops;
+
+ p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+ if (!p_hwfn->unzip_buf) {
+ DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+ return -ENOMEM;
+ }
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = le32_to_cpu(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+ case INIT_OP_READ:
+ qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+ case INIT_OP_IF_MODE:
+ cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ phase, phase_id);
+ b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+ break;
+ case INIT_OP_DELAY:
+ /* qed_init_run is always invoked from
+ * sleep-able context
+ */
+ udelay(le32_to_cpu(cmd->delay.delay));
+ break;
+
+ case INIT_OP_CALLBACK:
+ qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ kfree(p_hwfn->unzip_buf);
+ return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 gtt_base;
+ u32 i;
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *data)
+{
+ struct qed_fw_data *fw = cdev->fw_data;
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!data) {
+ DP_NOTICE(cdev, "Invalid fw data\n");
+ return -EINVAL;
+ }
+
+ buf_hdr = (struct bin_buffer_hdr *)data;
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644
index 0000000..1e83204
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -0,0 +1,110 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * @brief qed_init_iro_array - init iro_arr.
+ *
+ *
+ * @param cdev
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * @brief qed_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes);
+
+/**
+ * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _qed_status_t
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+/**
+ * @brief
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ * @param size
+ */
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ * Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644
index 0000000..2e399b6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -0,0 +1,1134 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+struct qed_pi_info {
+ qed_int_comp_cb_t comp_cb;
+ void *cookie;
+};
+
+struct qed_sb_sp_info {
+ struct qed_sb_info sb_info;
+
+ /* per protocol index data */
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct qed_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+ struct qed_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0;
+ u16 index;
+
+ /* Make certain HW write took affect */
+ mmiowb();
+
+ index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = QED_SB_ATT_IDX;
+ }
+
+ /* Make certain we got a consistent view with HW */
+ mmiowb();
+
+ return rc;
+}
+
+/**
+ * @brief qed_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return int
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn,
+ u16 asserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return 0;
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return int
+ *
+ */
+static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_mask;
+
+ if (deasserted_bits != 0x100)
+ DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+
+ /* Clear IGU indication for the deasserted bits */
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return 0;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u32 attn_bits = 0, attn_acks = 0;
+ u16 asserted_bits, deasserted_bits;
+ __le16 index;
+ int rc = 0;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = p_sb_attn->sb_index;
+ attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+ attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+ } while (index != p_sb_attn->sb_index);
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ } else if (asserted_bits == 0x100) {
+ DP_INFO(p_hwfn,
+ "MFW indication via attention\n");
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication [deassertion]\n");
+ }
+
+ if (asserted_bits) {
+ rc = qed_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits) {
+ rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+ void __iomem *igu_addr,
+ u32 ack_cons)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ mmiowb();
+ barrier();
+}
+
+void qed_int_sp_dpc(unsigned long hwfn_cookie)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+ struct qed_pi_info *pi_info = NULL;
+ struct qed_sb_attn_info *sb_attn;
+ struct qed_sb_info *sb_info;
+ int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->cdev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+
+ rc = qed_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & QED_SB_EVENT_MASK)) {
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & QED_SB_ATT_IDX)
+ qed_int_attentions(p_hwfn);
+
+ if (rc & QED_SB_IDX) {
+ int pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & QED_SB_ATT_IDX))
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (p_sb) {
+ if (p_sb->sb_attn)
+ dma_free_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn,
+ p_sb->sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb;
+ void *p_virt;
+ dma_addr_t p_phys = 0;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ u32 cau_state;
+
+ memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ /* setting the time resultion to a fixed value ( = 1) */
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+ QED_CAU_DEF_RX_TIMER_RES);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+ QED_CAU_DEF_TX_TIMER_RES);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!p_hwfn->cdev->rx_coalesce_usecs)
+ p_hwfn->cdev->rx_coalesce_usecs =
+ QED_CAU_DEF_RX_USECS;
+ if (!p_hwfn->cdev->tx_coalesce_usecs)
+ p_hwfn->cdev->tx_coalesce_usecs =
+ QED_CAU_DEF_TX_USECS;
+ }
+
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+ u32 val;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
+ upper_32_bits(sb_phys));
+
+ val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
+ (QED_CAU_DEF_RX_TIMER_RES + 1);
+ u8 num_tc = 1, i;
+
+ qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ QED_COAL_RX_STATE_MACHINE,
+ timeset);
+
+ timeset = p_hwfn->cdev->tx_coalesce_usecs >>
+ (QED_CAU_DEF_TX_TIMER_RES + 1);
+
+ for (i = 0; i < num_tc; i++) {
+ qed_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ QED_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset;
+ u32 pi_offset;
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief qed_get_igu_sb_id - given a sw sb_id return the
+ * igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == QED_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else
+ igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
+ (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+
+ return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id != QED_SP_SB_ID) {
+ p_hwfn->sbs_info[sb_id] = sb_info;
+ p_hwfn->num_sbs++;
+ }
+
+ sb_info->cdev = p_hwfn->cdev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+
+ sb_info->flags |= QED_SB_INFO_INIT;
+
+ qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ if (sb_id == QED_SP_SB_ID) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return -EINVAL;
+ }
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ p_hwfn->sbs_info[sb_id] = NULL;
+ p_hwfn->num_sbs--;
+
+ return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (p_sb) {
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+ p_phys, QED_SP_SB_ID);
+
+ memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return 0;
+}
+
+static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ if (!p_hwfn)
+ return;
+
+ if (p_hwfn->p_sp_sb)
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup Slow path status block - NULL pointer\n");
+
+ if (p_hwfn->p_sb_attn)
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup attentions status block - NULL pointer\n");
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ qed_status = 0;
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
+ qed_status = 0;
+ }
+
+ return qed_status;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+ p_hwfn->cdev->int_mode = int_mode;
+ switch (p_hwfn->cdev->int_mode) {
+ case QED_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case QED_INT_MODE_POLL:
+ break;
+ }
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ int i;
+
+ p_hwfn->b_int_enabled = 1;
+
+ /* Mask non-link attentions */
+ for (i = 0; i < 9; i++)
+ qed_wr(p_hwfn, p_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+
+ /* Enable interrupt Generation */
+ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+ /* Configure AEU signal change to produce attentions for link */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+
+ /* Flush the writes to IGU */
+ mmiowb();
+
+ /* Unmask AEU signals toward IGU */
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid
+ )
+{
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+ u32 data = 0;
+ u32 cmd_ctrl = 0;
+ u32 val = 0;
+ u32 sb_bit = 0;
+ u32 sb_bit_addr = 0;
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ barrier();
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ /* Flush the write to IGU */
+ mmiowb();
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (sb_id % 32);
+ sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+ /* Now wait for the command to complete */
+ do {
+ val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+
+ usleep_range(5000, 10000);
+ } while (--sleep_cnt);
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set)
+{
+ int pi;
+
+ /* Set */
+ if (b_set)
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+ /* Clear */
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath)
+{
+ u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+ u32 sb_id = 0;
+ u32 val = 0;
+
+ val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning SBs [%d,...,%d]\n",
+ igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+ for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+
+ if (b_slowpath) {
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+ }
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *blk;
+ u32 val;
+ u16 sb_id;
+ u16 prev_sb_id = 0xFF;
+
+ p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+
+ if (!p_hwfn->hw_info.p_igu_info)
+ return -ENOMEM;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Initialize base sb / sb cnt for PFs */
+ p_igu_info->igu_base_sb = 0xffff;
+ p_igu_info->igu_sb_cnt = 0;
+ p_igu_info->igu_dsb_id = 0xffff;
+ p_igu_info->igu_base_sb_iov = 0xffff;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ break;
+
+ blk->status = QED_IGU_STATUS_VALID;
+ blk->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ blk->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
+ val, blk->function_id, blk->is_pf,
+ blk->vector_number);
+
+ if (blk->is_pf) {
+ if (blk->function_id == p_hwfn->rel_pf_id) {
+ blk->status |= QED_IGU_STATUS_PF;
+
+ if (blk->vector_number == 0) {
+ if (p_igu_info->igu_dsb_id == 0xffff)
+ p_igu_info->igu_dsb_id = sb_id;
+ } else {
+ if (p_igu_info->igu_base_sb ==
+ 0xffff) {
+ p_igu_info->igu_base_sb = sb_id;
+ } else if (prev_sb_id != sb_id - 1) {
+ DP_NOTICE(p_hwfn->cdev,
+ "consecutive igu vectors for HWFN %x broken",
+ p_hwfn->rel_pf_id);
+ break;
+ }
+ prev_sb_id = sb_id;
+ /* we don't count the default */
+ (p_igu_info->igu_sb_cnt)++;
+ }
+ }
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+
+ if (p_igu_info->igu_base_sb == 0xffff ||
+ p_igu_info->igu_dsb_id == 0xffff ||
+ p_igu_info->igu_sb_cnt == 0) {
+ DP_NOTICE(p_hwfn,
+ "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = 0;
+
+ igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+ u64 intr_status = 0;
+ u32 intr_status_lo = 0;
+ u32 intr_status_hi = 0;
+ u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+ u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ lsb_igu_cmd_addr * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ msb_igu_cmd_addr * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+ tasklet_init(p_hwfn->sp_dpc,
+ qed_int_sp_dpc, (unsigned long)p_hwfn);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+ if (!p_hwfn->sp_dpc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->sp_dpc);
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ int rc = 0;
+
+ rc = qed_int_sp_dpc_alloc(p_hwfn);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+ return rc;
+ }
+ rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+ return rc;
+ }
+ rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
+ return rc;
+ }
+ return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+ qed_int_sp_sb_free(p_hwfn);
+ qed_int_sb_attn_free(p_hwfn);
+ qed_int_sp_dpc_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ qed_int_sp_sb_setup(p_hwfn, p_ptt);
+ qed_int_sp_dpc_setup(p_hwfn);
+}
+
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks)
+{
+ struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+ if (!info)
+ return 0;
+
+ if (p_iov_blks)
+ *p_iov_blks = info->free_blks;
+
+ return info->igu_sb_cnt;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644
index 0000000..16b5751
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -0,0 +1,391 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+enum qed_coalescing_fsm {
+ QED_COAL_RX_STATE_MACHINE,
+ QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief qed_int_cau_conf_pi - configure cau for a given
+ * status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset);
+
+/**
+ * @brief qed_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief qed_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * @brief qed_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info points to an uninitialized (but
+ * allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
+ *
+ * @return int
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id);
+/**
+ * @brief qed_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info initialized sb_info structure
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info);
+
+/**
+ * @brief qed_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info points to an allocated sb_info structure
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block)
+ *
+ * @return int
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void qed_int_sp_dpc(unsigned long hwfn_cookie);
+
+/**
+ * @brief qed_int_get_num_sbs - get the number of status
+ * blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_iov_blks - configured free blks for vfs
+ *
+ * @return int - number of status blocks configured
+ */
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks);
+
+/**
+ * @file
+ *
+ * @brief Interrupt handler
+ */
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX 0x0001
+#define QED_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct qed_igu_block {
+ u8 status;
+#define QED_IGU_STATUS_FREE 0x01
+#define QED_IGU_STATUS_VALID 0x02
+#define QED_IGU_STATUS_PF 0x04
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+};
+
+struct qed_igu_map {
+ struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct qed_igu_info {
+ struct qed_igu_map igu_map;
+ u16 igu_dsb_id;
+ u16 igu_base_sb;
+ u16 igu_base_sb_iov;
+ u16 igu_sb_cnt;
+ u16 igu_sb_cnt_iov;
+ u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * @brief qed_int_register_cb - Register callback func for
+ * slowhwfn statusblock.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ * interrupt on the sp sb
+ *
+ * @param cookie - passed to the callback function
+ * @param sb_idx - OUT parameter which gives the chosen index
+ * for this protocol.
+ * @param p_fw_cons - pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * @return int
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons);
+
+/**
+ * @brief qed_int_unregister_cb - Unregisters callback
+ * function from sp sb.
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return int
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+ u8 pi);
+
+/**
+ * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param cleanup_set - set(1) / clear(0)
+ * @param opaque_fid - the function for which to perform
+ * cleanup, for example a PF on behalf of
+ * its VFs.
+ */
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param opaque - opaque fid of the sb owner.
+ * @param cleanup_set - set(1) / clear(0)
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set);
+
+/**
+ * @brief qed_int_cau_conf - configure cau for a given status
+ * block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+/**
+ * @brief qed_int_alloc
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_free
+ *
+ * @param p_hwfn
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_setup
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ */
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644
index 0000000..f72036a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -0,0 +1,1704 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+enum qed_rss_caps {
+ QED_RSS_IPV4 = 0x1,
+ QED_RSS_IPV6 = 0x2,
+ QED_RSS_IPV4_TCP = 0x4,
+ QED_RSS_IPV6_TCP = 0x8,
+ QED_RSS_IPV4_UDP = 0x10,
+ QED_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_approx_mcast_flg;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+};
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ u32 concrete_fid,
+ u16 opaque_fid,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ struct qed_sp_init_request_params params;
+ struct vport_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+ u16 rx_mode = 0;
+ u8 abs_vport_id = 0;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(¶ms, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH,
+ ¶ms);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = cpu_to_le16(mtu);
+ p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
+ p_ramrod->drop_ttl0_en = drop_ttl0_flg;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+ /* TPA related fields */
+ memset(&p_ramrod->tpa_param, 0,
+ sizeof(struct eth_vport_tpa_param));
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+ concrete_fid);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_rss_params *p_params)
+{
+ struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
+ u16 abs_l2_queue = 0, capabilities = 0;
+ int rc = 0, i;
+
+ if (!p_params) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+
+ BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
+ ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+ if (rc)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_params->update_rss_config;
+ rss->update_rss_capabilities = p_params->update_rss_capabilities;
+ rss->update_rss_ind_table = p_params->update_rss_ind_table;
+ rss->update_rss_key = p_params->update_rss_key;
+
+ rss->rss_mode = p_params->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR :
+ ETH_VPORT_RSS_MODE_DISABLED;
+
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
+ rss->tbl_size = p_params->rss_table_size_log;
+
+ rss->capabilities = cpu_to_le16(capabilities);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ rss->rss_mode, rss->update_rss_capabilities,
+ capabilities, rss->update_rss_ind_table,
+ rss->update_rss_key);
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ rc = qed_fw_l2_queue(p_hwfn,
+ (u8)p_params->rss_ind_table[i],
+ &abs_l2_queue);
+ if (rc)
+ return rc;
+
+ rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
+ i, rss->indirection_table[i]);
+ }
+
+ for (i = 0; i < 10; i++)
+ rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+
+ return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_filter_accept_flags accept_flags)
+{
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->rx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->rx_mode.state = 0x%x\n", state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->tx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->tx_mode.state = 0x%x\n", state);
+ }
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sp_vport_update_params *p_params)
+{
+ int i;
+
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (p_params->update_approx_mcast_flg) {
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+ __le32 val = cpu_to_le32(p_bins[i]);
+
+ p_ramrod->approx_mcast.bins[i] = val;
+ }
+ }
+}
+
+static int
+qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct qed_sp_init_request_params sp_params;
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_params->opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
+
+ p_cmn->vport_id = abs_vport_id;
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+ rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc) {
+ /* Return spq entry which is taken in qed_sp_init_request()*/
+ qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+ qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 vport_id)
+{
+ struct qed_sp_init_request_params sp_params;
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct qed_spq_entry *p_ent;
+ u8 abs_vport_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+ u8 vport,
+ struct qed_filter_accept_flags accept_flags,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_sp_vport_update_params vport_update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+ }
+
+ return 0;
+}
+
+static int qed_sp_release_queue_cid(
+ struct qed_hwfn *p_hwfn,
+ struct qed_hw_cid_data *p_cid_data)
+{
+ if (!p_cid_data->b_cid_allocated)
+ return 0;
+
+ qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
+
+ p_cid_data->b_cid_allocated = false;
+
+ return 0;
+}
+
+static int
+qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
+{
+ struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ /* Store information for the stop */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = params->vport_id;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
+ if (rc != 0)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, cid, params->queue_id, params->vport_id,
+ params->sb);
+
+ memset(&sp_params, 0, sizeof(params));
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ cid, opaque_fid,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(params->sb);
+ p_ramrod->sb_index = params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr);
+ p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr);
+ p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ return rc;
+}
+
+static int
+qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ struct qed_hw_cid_data *p_rx_cid;
+ u64 init_prod_val = 0;
+ u16 abs_l2_queue = 0;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
+ if (rc != 0)
+ return rc;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)(&init_prod_val));
+
+ /* Allocate a CID for the queue */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_rx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_rx_cid->b_cid_allocated = true;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_rx_cid->cid,
+ params,
+ abs_stats_id,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size);
+
+ if (rc != 0)
+ qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u16 abs_rx_q_id = 0;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_rx_cid->cid,
+ p_rx_cid->opaque_fid,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg =
+ (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
+ !eq_completion_only) || cqe_completion;
+ p_ramrod->complete_event_flg =
+ !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
+ eq_completion_only;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+}
+
+static int
+qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
+{
+ struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_tx_cid;
+ u8 abs_vport_id;
+ int rc = -EINVAL;
+ u16 pq_id;
+
+ /* Store information for the stop */
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ p_tx_cid->cid = cid;
+ p_tx_cid->opaque_fid = opaque_fid;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
+ opaque_fid,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->tc = p_pq_params->eth.tc;
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
+ p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+
+ pq_id = qed_get_qm_pq(p_hwfn,
+ PROTOCOLID_ETH,
+ p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hw_cid_data *p_tx_cid;
+ union qed_qm_pq_params pq_params;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
+ return rc;
+
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ memset(p_tx_cid, 0, sizeof(*p_tx_cid));
+ memset(&pq_params, 0, sizeof(pq_params));
+
+ /* Allocate a CID for the queue */
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_tx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_tx_cid->b_cid_allocated = true;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, p_tx_cid->cid,
+ p_params->queue_id, p_params->vport_id, p_params->sb);
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_tx_cid->cid,
+ p_params,
+ abs_stats_id,
+ pbl_addr,
+ pbl_size,
+ &pq_params);
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+ if (rc)
+ qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id)
+{
+ struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_tx_cid->cid,
+ p_tx_cid->opaque_fid,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+}
+
+static enum eth_filter_action
+qed_filter_action(enum qed_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case QED_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case QED_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case QED_FILTER_REPLACE:
+ case QED_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REPLACE;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct qed_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ struct qed_sp_init_request_params sp_params;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ enum eth_filter_action action;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(**pp_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, pp_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+ switch (p_filter_cmd->opcode) {
+ case QED_FILTER_FLUSH:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+ case QED_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case QED_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+ case QED_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+ case QED_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+ case QED_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+ case QED_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+ case QED_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+ case QED_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case QED_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+ case QED_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+ qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else {
+ action = qed_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ return -EINVAL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id = (p_filter_cmd->opcode ==
+ QED_FILTER_REMOVE) ?
+ vport_to_remove_from :
+ vport_to_add_to;
+ }
+
+ return 0;
+}
+
+static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct eth_filter_cmd_header *p_header;
+ int rc;
+
+ rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "Unicast filter ADD command failed %d\n",
+ rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter,
+ p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0],
+ p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2],
+ p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4],
+ p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+ u32 crc32_length,
+ u32 crc32_seed,
+ u8 complement)
+{
+ u32 byte = 0;
+ u32 bit = 0;
+ u8 msb = 0;
+ u8 current_byte = 0;
+ u32 crc32_result = crc32_seed;
+
+ if ((!crc32_packet) ||
+ (crc32_length == 0) ||
+ ((crc32_length % 8) != 0))
+ return crc32_result;
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1; /*crc32_result[0] = 1;*/
+ }
+ }
+ }
+ return crc32_result;
+}
+
+static inline u32 qed_crc32c_le(u32 seed,
+ u8 *mac,
+ u32 len)
+{
+ u32 packet_buf[2] = { 0 };
+
+ memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+static u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+ mac, ETH_ALEN);
+
+ return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc, i;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ } else {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ }
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+ memset(bins, 0, sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport
+ */
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, bins);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)bins;
+ struct vport_update_ramrod_mcast *approx_mcast;
+
+ approx_mcast = &p_ramrod->approx_mcast;
+ approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+ (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+ return -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_mcast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+ return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+
+ return rc;
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+ struct qed_dev_eth_info *info)
+{
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ info->num_tc = 1;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues += FEAT_NUM(&cdev->hwfns[i],
+ QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues = min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+ struct qed_eth_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_start(p_hwfn,
+ p_hwfn->hw_info.concrete_fid,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id,
+ mtu,
+ drop_ttl0_flg,
+ inner_vlan_removal_en_flg);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT\n");
+ return rc;
+ }
+
+ qed_hw_start_fastpath(p_hwfn);
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started V-PORT %d with MTU %d\n",
+ vport_id, mtu);
+ }
+
+ qed_reset_vport_stats(cdev);
+
+ return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev,
+ u8 vport_id)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_stop(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop VPORT\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+ struct qed_update_vport_params *params)
+{
+ struct qed_sp_vport_update_params sp_params;
+ struct qed_rss_params sp_rss_params;
+ int rc, i;
+
+ if (!cdev)
+ return -ENODEV;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+ /* Translate protocol params into sp params */
+ sp_params.vport_id = params->vport_id;
+ sp_params.update_vport_active_rx_flg =
+ params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg =
+ params->update_vport_active_flg;
+ sp_params.vport_active_rx_flg = params->vport_active_flg;
+ sp_params.vport_active_tx_flg = params->vport_active_flg;
+
+ /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+ * We need to re-fix the rss values per engine for CMT.
+ */
+ if (cdev->num_hwfns > 1 && params->update_rss_flg) {
+ struct qed_update_vport_rss_params *rss =
+ ¶ms->rss_params;
+ int k, max = 0;
+
+ /* Find largest entry, since it's possible RSS needs to
+ * be disabled [in case only 1 queue per-hwfn]
+ */
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ max = (max > rss->rss_ind_table[k]) ?
+ max : rss->rss_ind_table[k];
+
+ /* Either fix RSS values or disable RSS */
+ if (cdev->num_hwfns < max + 1) {
+ int divisor = (max + cdev->num_hwfns - 1) /
+ cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - fixing RSS values (modulo %02x)\n",
+ divisor);
+
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ rss->rss_ind_table[k] =
+ rss->rss_ind_table[k] % divisor;
+ } else {
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ params->update_rss_flg = 0;
+ }
+ }
+
+ /* Now, update the RSS configuration for actual configuration */
+ if (params->update_rss_flg) {
+ sp_rss_params.update_rss_config = 1;
+ sp_rss_params.rss_enable = 1;
+ sp_rss_params.update_rss_capabilities = 1;
+ sp_rss_params.update_rss_ind_table = 1;
+ sp_rss_params.update_rss_key = 1;
+ sp_rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+ memcpy(sp_rss_params.rss_ind_table,
+ params->rss_params.rss_ind_table,
+ QED_RSS_IND_TABLE_SIZE * sizeof(u16));
+ memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+ QED_RSS_KEY_SIZE * sizeof(u32));
+ }
+ sp_params.rss_params = &sp_rss_params;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = qed_sp_vport_update(p_hwfn, &sp_params,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_ERR(cdev, "Failed to update VPORT\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Updated V-PORT %d: active_flag %d [update %d]\n",
+ params->vport_id, params->vport_active_flg,
+ params->update_vport_active_flg);
+ }
+
+ return 0;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ pp_prod);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ params->queue_id, params->rss_id, params->vport_id,
+ params->sb);
+
+ return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev,
+ struct qed_stop_rxq_params *params)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ params->rx_queue_id / cdev->num_hwfns,
+ params->eq_completion_only,
+ false);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = p_params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ p_params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ pbl_addr,
+ pbl_size,
+ pp_doorbell);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, p_params->rss_id, p_params->vport_id,
+ p_params->sb);
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+ qed_hw_stop_fastpath(cdev);
+
+ return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev,
+ struct qed_stop_txq_params *params)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ params->tx_queue_id / cdev->num_hwfns);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qed_filter_accept_flags accept_flags;
+
+ memset(&accept_flags, 0, sizeof(accept_flags));
+
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+ accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
+ else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+
+ return qed_filter_accept_cmd(cdev, 0, accept_flags,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params)
+{
+ struct qed_filter_ucast ucast;
+
+ if (!params->vlan_valid && !params->mac_valid) {
+ DP_NOTICE(
+ cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ return -EINVAL;
+ }
+
+ memset(&ucast, 0, sizeof(ucast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ ucast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ ucast.opcode = QED_FILTER_REMOVE;
+ break;
+ case QED_FILTER_XCAST_TYPE_REPLACE:
+ ucast.opcode = QED_FILTER_REPLACE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+ params->type);
+ }
+
+ if (params->vlan_valid && params->mac_valid) {
+ ucast.type = QED_FILTER_MAC_VLAN;
+ ether_addr_copy(ucast.mac, params->mac);
+ ucast.vlan = params->vlan;
+ } else if (params->mac_valid) {
+ ucast.type = QED_FILTER_MAC;
+ ether_addr_copy(ucast.mac, params->mac);
+ } else {
+ ucast.type = QED_FILTER_VLAN;
+ ucast.vlan = params->vlan;
+ }
+
+ ucast.is_rx_filter = true;
+ ucast.is_tx_filter = true;
+
+ return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params)
+{
+ struct qed_filter_mcast mcast;
+ int i;
+
+ memset(&mcast, 0, sizeof(mcast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ mcast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ mcast.opcode = QED_FILTER_REMOVE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+ params->type);
+ }
+
+ mcast.num_mc_addrs = params->num;
+ for (i = 0; i < mcast.num_mc_addrs; i++)
+ ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+ return qed_filter_mcast_cmd(cdev, &mcast,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter(struct qed_dev *cdev,
+ struct qed_filter_params *params)
+{
+ enum qed_filter_rx_mode_type accept_flags;
+
+ switch (params->type) {
+ case QED_FILTER_TYPE_UCAST:
+ return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast);
+ case QED_FILTER_TYPE_MCAST:
+ return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast);
+ case QED_FILTER_TYPE_RX_MODE:
+ accept_flags = params->filter.accept_flags;
+ return qed_configure_filter_rx_mode(cdev, accept_flags);
+ default:
+ DP_NOTICE(cdev, "Unknown filter type %d\n",
+ (int)params->type);
+ return -EINVAL;
+ }
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+ u8 rss_id,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+ cqe);
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_eth_dev_info,
+ .register_ops = &qed_register_eth_ops,
+ .vport_start = &qed_start_vport,
+ .vport_stop = &qed_stop_vport,
+ .vport_update = &qed_update_vport,
+ .q_rx_start = &qed_start_rxq,
+ .q_rx_stop = &qed_stop_rxq,
+ .q_tx_start = &qed_start_txq,
+ .q_tx_stop = &qed_stop_txq,
+ .filter_config = &qed_configure_filter,
+ .fastpath_stop = &qed_fastpath_stop,
+ .eth_cqe_completion = &qed_fp_cqe_completion,
+ .get_vport_stats = &qed_get_vport_stats,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+{
+ if (version != QED_ETH_INTERFACE_VERSION) {
+ pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
+ version, QED_ETH_INTERFACE_VERSION);
+ return NULL;
+ }
+
+ return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+ /* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644
index 0000000..947c7af
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -0,0 +1,1169 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/qed/qed_if.h>
+
+#include "qed.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_hw.h"
+
+static const char version[] =
+ "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define FW_FILE_VERSION \
+ __stringify(FW_MAJOR_VERSION) "." \
+ __stringify(FW_MINOR_VERSION) "." \
+ __stringify(FW_REVISION_VERSION) "." \
+ __stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME \
+ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+static int __init qed_init(void)
+{
+ pr_notice("qed_init called\n");
+
+ pr_info("%s", version);
+
+ return 0;
+}
+
+static void __exit qed_cleanup(void)
+{
+ pr_notice("qed_cleanup called\n");
+}
+
+module_init(qed_init);
+module_exit(qed_cleanup);
+
+/* Check if the DMA controller on the machine can properly handle the DMA
+ * addressing required by the device.
+*/
+static int qed_set_coherency_mask(struct qed_dev *cdev)
+{
+ struct device *dev = &cdev->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ DP_NOTICE(cdev,
+ "Can't request 64-bit consistent allocations\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+ struct pci_dev *pdev = cdev->pdev;
+
+ if (cdev->doorbells)
+ iounmap(cdev->doorbells);
+ if (cdev->regview)
+ iounmap(cdev->regview);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev,
+ struct pci_dev *pdev)
+{
+ int rc;
+
+ cdev->pdev = pdev;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Cannot enable PCI device\n");
+ goto err0;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #0\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #2\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, "qed");
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to request PCI memory resources\n");
+ goto err1;
+ }
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ DP_NOTICE(cdev, "The bus is not PCI Express\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (cdev->pci_params.pm_cap == 0)
+ DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+ rc = qed_set_coherency_mask(cdev);
+ if (rc)
+ goto err2;
+
+ cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+ cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+ cdev->pci_params.irq = pdev->irq;
+
+ cdev->regview = pci_ioremap_bar(pdev, 0);
+ if (!cdev->regview) {
+ DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+
+err2:
+ pci_release_regions(pdev);
+err1:
+ pci_disable_device(pdev);
+err0:
+ return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info)
+{
+ struct qed_ptt *ptt;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ dev_info->num_hwfns = cdev->num_hwfns;
+ dev_info->pci_mem_start = cdev->pci_params.mem_start;
+ dev_info->pci_mem_end = cdev->pci_params.mem_end;
+ dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+ ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+
+ qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+
+ return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+ kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+ struct qed_dev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return cdev;
+
+ qed_init_struct(cdev);
+
+ return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev,
+ pci_power_t state)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+ return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+ enum qed_protocol protocol,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct qed_dev *cdev;
+ int rc;
+
+ cdev = qed_alloc_cdev(pdev);
+ if (!cdev)
+ goto err0;
+
+ cdev->protocol = protocol;
+
+ qed_init_dp(cdev, dp_module, dp_level);
+
+ rc = qed_init_pci(cdev, pdev);
+ if (rc) {
+ DP_ERR(cdev, "init pci failed\n");
+ goto err1;
+ }
+ DP_INFO(cdev, "PCI init completed successfully\n");
+
+ rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+ if (rc) {
+ DP_ERR(cdev, "hw prepare failed\n");
+ goto err2;
+ }
+
+ DP_INFO(cdev, "qed_probe completed successffuly\n");
+
+ return cdev;
+
+err2:
+ qed_free_pci(cdev);
+err1:
+ qed_free_cdev(cdev);
+err0:
+ return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return;
+
+ qed_hw_remove(cdev);
+
+ qed_free_pci(cdev);
+
+ qed_set_power_state(cdev, PCI_D3hot);
+
+ qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ pci_disable_msix(cdev->pdev);
+ kfree(cdev->int_params.msix_table);
+ } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+ pci_disable_msi(cdev->pdev);
+ }
+
+ memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+ struct qed_int_params *int_params)
+{
+ int i, rc, cnt;
+
+ cnt = int_params->in.num_vectors;
+
+ for (i = 0; i < cnt; i++)
+ int_params->msix_table[i].entry = i;
+
+ rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+ int_params->in.min_msix_cnt, cnt);
+ if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+ (rc % cdev->num_hwfns)) {
+ pci_disable_msix(cdev->pdev);
+
+ /* If fastpath is initialized, we need at least one interrupt
+ * per hwfn [and the slow path interrupts]. New requested number
+ * should be a multiple of the number of hwfns.
+ */
+ cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+ DP_NOTICE(cdev,
+ "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+ cnt, int_params->in.num_vectors);
+ rc = pci_enable_msix_exact(cdev->pdev,
+ int_params->msix_table, cnt);
+ if (!rc)
+ rc = cnt;
+ }
+
+ if (rc > 0) {
+ /* MSI-x configuration was achieved */
+ int_params->out.int_mode = QED_INT_MODE_MSIX;
+ int_params->out.num_vectors = rc;
+ rc = 0;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+ cnt, rc);
+ }
+
+ return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+ struct qed_int_params *int_params = &cdev->int_params;
+ struct msix_entry *tbl;
+ int rc = 0, cnt;
+
+ switch (int_params->in.int_mode) {
+ case QED_INT_MODE_MSIX:
+ /* Allocate MSIX table */
+ cnt = int_params->in.num_vectors;
+ int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+ if (!int_params->msix_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Enable MSIX */
+ rc = qed_enable_msix(cdev, int_params);
+ if (!rc)
+ goto out;
+
+ DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+ kfree(int_params->msix_table);
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_MSI:
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
+
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_INTA:
+ int_params->out.int_mode = QED_INT_MODE_INTA;
+ rc = 0;
+ goto out;
+ default:
+ DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+ int_params->in.int_mode);
+ rc = -EINVAL;
+ }
+
+out:
+ cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+ return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+ int index, void(*handler)(void *))
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ hwfn->simd_proto_handler[relative_idx].func = handler;
+ hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ memset(&hwfn->simd_proto_handler[relative_idx], 0,
+ sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+ struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+ struct qed_hwfn *hwfn;
+ irqreturn_t rc = IRQ_NONE;
+ u64 status;
+ int i, j;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+ if (!status)
+ continue;
+
+ hwfn = &cdev->hwfns[i];
+
+ /* Slowpath interrupt */
+ if (unlikely(status & 0x1)) {
+ tasklet_schedule(hwfn->sp_dpc);
+ status &= ~0x1;
+ rc = IRQ_HANDLED;
+ }
+
+ /* Fastpath interrupts */
+ for (j = 0; j < 64; j++) {
+ if ((0x2ULL << j) & status) {
+ hwfn->simd_proto_handler[j].func(
+ hwfn->simd_proto_handler[j].token);
+ status &= ~(0x2ULL << j);
+ rc = IRQ_HANDLED;
+ }
+ }
+
+ if (unlikely(status))
+ DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+ "got an unknown interrupt status 0x%llx\n",
+ status);
+ }
+
+ return rc;
+}
+
+static int qed_slowpath_irq_req(struct qed_dev *cdev)
+{
+ int i = 0, rc = 0;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ /* Request all the slowpath MSI-X vectors */
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ snprintf(cdev->hwfns[i].name, NAME_SIZE,
+ "sp-%d-%02x:%02x.%02x",
+ i, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ cdev->hwfns[i].abs_pf_id);
+
+ rc = request_irq(cdev->int_params.msix_table[i].vector,
+ qed_msix_sp_int, 0,
+ cdev->hwfns[i].name,
+ cdev->hwfns[i].sp_dpc);
+ if (rc)
+ break;
+
+ DP_VERBOSE(&cdev->hwfns[i],
+ (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath MSI-X\n");
+ }
+
+ if (i != cdev->num_hwfns) {
+ /* Free already request MSI-X vectors */
+ for (i--; i >= 0; i--) {
+ unsigned int vec =
+ cdev->int_params.msix_table[i].vector;
+ synchronize_irq(vec);
+ free_irq(cdev->int_params.msix_table[i].vector,
+ cdev->hwfns[i].sp_dpc);
+ }
+ }
+ } else {
+ unsigned long flags = 0;
+
+ snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+ cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+ PCI_FUNC(cdev->pdev->devfn));
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+ flags |= IRQF_SHARED;
+
+ rc = request_irq(cdev->pdev->irq, qed_single_int,
+ flags, cdev->name, cdev);
+ }
+
+ return rc;
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+ int i;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i) {
+ synchronize_irq(cdev->int_params.msix_table[i].vector);
+ free_irq(cdev->int_params.msix_table[i].vector,
+ cdev->hwfns[i].sp_dpc);
+ }
+ } else {
+ free_irq(cdev->pdev->irq, cdev);
+ }
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+ int i, rc;
+
+ rc = qed_hw_stop(cdev);
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ p_hwfn->b_sp_dpc_enabled = false;
+ DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+ "Disabled sp taskelt [hwfn %d] at %p\n",
+ i, p_hwfn->sp_dpc);
+ }
+ }
+
+ return rc;
+}
+
+static int qed_nic_reset(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_hw_reset(cdev);
+ if (rc)
+ return rc;
+
+ qed_resc_free(cdev);
+
+ return 0;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_resc_alloc(cdev);
+ if (rc)
+ return rc;
+
+ DP_INFO(cdev, "Allocated qed resources\n");
+
+ qed_resc_setup(cdev);
+
+ return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+ limit = cdev->num_hwfns * 63;
+ else if (cdev->int_params.fp_msix_cnt)
+ limit = cdev->int_params.fp_msix_cnt;
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(struct qed_int_info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ /* Need to expose only MSI-X information; Single IRQ is handled solely
+ * by qed.
+ */
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.fp_msix_base;
+
+ info->msix_cnt = cdev->int_params.fp_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+ }
+
+ return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+ enum qed_int_mode int_mode)
+{
+ int rc, i;
+ u8 num_vectors = 0;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+
+ cdev->int_params.in.int_mode = int_mode;
+ for_each_hwfn(cdev, i)
+ num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
+ cdev->int_params.in.num_vectors = num_vectors;
+
+ /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+ rc = qed_set_int_mode(cdev, false);
+ if (rc) {
+ DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ return rc;
+ }
+
+ cdev->int_params.fp_msix_base = cdev->num_hwfns;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+ cdev->num_hwfns;
+
+ return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+ rc);
+ return 0;
+ }
+
+ rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+ zlib_inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+ p_hwfn->stream->msg, rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+ void *workspace;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+
+ workspace = vzalloc(zlib_inflate_workspacesize());
+ if (!workspace)
+ return -ENOMEM;
+ p_hwfn->stream->workspace = workspace;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ vfree(p_hwfn->stream->workspace);
+ kfree(p_hwfn->stream);
+ }
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+ struct qed_pf_params *params)
+{
+ int i;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->pf_params = *params;
+ }
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+ struct qed_slowpath_params *params)
+{
+ struct qed_mcp_drv_version drv_version;
+ const u8 *data = NULL;
+ struct qed_hwfn *hwfn;
+ int rc;
+
+ rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
+
+ rc = qed_nic_setup(cdev);
+ if (rc)
+ goto err;
+
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (rc)
+ goto err1;
+
+ /* Request the slowpath IRQ */
+ rc = qed_slowpath_irq_req(cdev);
+ if (rc)
+ goto err2;
+
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err3;
+ }
+
+ /* Start the slowpath */
+ data = cdev->firmware->data;
+
+ rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ true, data);
+ if (rc)
+ goto err3;
+
+ DP_INFO(cdev,
+ "HW initialization and function start completed successfully\n");
+
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
+
+ return 0;
+
+err3:
+ qed_free_stream_mem(cdev);
+ qed_slowpath_irq_free(cdev);
+err2:
+ qed_disable_msix(cdev);
+err1:
+ qed_resc_free(cdev);
+err:
+ release_firmware(cdev->firmware);
+
+ return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ qed_free_stream_mem(cdev);
+
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+
+ qed_disable_msix(cdev);
+ qed_nic_reset(cdev);
+
+ release_firmware(cdev->firmware);
+
+ return 0;
+}
+
+static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
+ char ver_str[VER_SIZE])
+{
+ int i;
+
+ memcpy(cdev->name, name, NAME_SIZE);
+ for_each_hwfn(cdev, i)
+ snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+
+ memcpy(cdev->ver_str, ver_str, VER_SIZE);
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id,
+ enum qed_sb_type type)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u8 n_hwfns;
+ u32 rc;
+
+ /* RoCE uses single engine and CMT uses two engines. When using both
+ * we force only a single engine. Storage uses only engine 0 too.
+ */
+ if (type == QED_SB_TYPE_L2_QUEUE)
+ n_hwfns = cdev->num_hwfns;
+ else
+ n_hwfns = 1;
+
+ hwfn_index = sb_id % n_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / n_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+ sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+ return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u32 rc;
+
+ hwfn_index = sb_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+ return rc;
+}
+
+static int qed_set_link(struct qed_dev *cdev,
+ struct qed_link_params *params)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_mcp_link_params *link_params;
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (!cdev)
+ return -ENODEV;
+
+ /* The link should be set only once per PF */
+ hwfn = &cdev->hwfns[0];
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = qed_mcp_get_link_params(hwfn);
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ link_params->speed.autoneg = params->autoneg;
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+ link_params->speed.advertised_speeds = 0;
+ if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
+ (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+ link_params->speed.forced_speed = params->forced_speed;
+
+ rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+ int port_type;
+
+ switch (media_type) {
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_KR:
+ port_type = PORT_FIBRE;
+ break;
+ case MEDIA_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case MEDIA_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case MEDIA_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case MEDIA_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+ struct qed_link_output *if_link)
+{
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_mcp_link_capabilities link_caps;
+ u32 media_type;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ /* TODO - at the moment assume supported and advertised speed equal */
+ if_link->supported_caps = SUPPORTED_FIBRE;
+ if (params.speed.autoneg)
+ if_link->supported_caps |= SUPPORTED_Autoneg;
+ if (params.pause.autoneg ||
+ (params.pause.forced_rx && params.pause.forced_tx))
+ if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ if_link->supported_caps |= SUPPORTED_Pause;
+
+ if_link->advertised_caps = if_link->supported_caps;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= 0;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->advertised_caps |= 0;
+
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->supported_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= 0;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->supported_caps |= 0;
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ /* TODO - fill duplex properly */
+ if_link->duplex = DUPLEX_FULL;
+ qed_mcp_get_media_type(hwfn->cdev, &media_type);
+ if_link->port = qed_get_port_type(media_type);
+
+ if_link->autoneg = params.speed.autoneg;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ /* Link partner capabilities */
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Half;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= 0;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= 0;
+
+ if (link.an_complete)
+ if_link->lp_caps |= SUPPORTED_Autoneg;
+
+ if (link.partner_adv_pause)
+ if_link->lp_caps |= SUPPORTED_Pause;
+ if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+ link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+ if_link->lp_caps |= SUPPORTED_Asym_Pause;
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+ struct qed_link_output *if_link)
+{
+ qed_fill_link(&cdev->hwfns[0], if_link);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+ struct qed_link_output if_link;
+
+ qed_fill_link(hwfn, &if_link);
+
+ if (IS_LEAD_HWFN(hwfn) && cookie)
+ op->link_update(cookie, &if_link);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int i, rc;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_drain(hwfn, ptt);
+ if (rc)
+ return rc;
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ return 0;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+ .probe = &qed_probe,
+ .remove = &qed_remove,
+ .set_power_state = &qed_set_power_state,
+ .set_id = &qed_set_id,
+ .update_pf_params = &qed_update_pf_params,
+ .slowpath_start = &qed_slowpath_start,
+ .slowpath_stop = &qed_slowpath_stop,
+ .set_fp_int = &qed_set_int_fp,
+ .get_fp_int = &qed_get_int_fp,
+ .sb_init = &qed_sb_init,
+ .sb_release = &qed_sb_release,
+ .simd_handler_config = &qed_simd_handler_config,
+ .simd_handler_clean = &qed_simd_handler_clean,
+ .set_link = &qed_set_link,
+ .get_link = &qed_get_current_link,
+ .drain = &qed_drain,
+ .update_msglvl = &qed_init_dp,
+ .chain_alloc = &qed_chain_alloc,
+ .chain_free = &qed_chain_free,
+};
+
+u32 qed_get_protocol_version(enum qed_protocol protocol)
+{
+ switch (protocol) {
+ case QED_PROTOCOL_ETH:
+ return QED_ETH_INTERFACE_VERSION;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644
index 0000000..20d048c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#define CHIP_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ u32 tmp, i;
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ /* The MB data is actually BE; Need to force it to cpu */
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ be32_to_cpu((__force __be32)tmp);
+ }
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ kfree(p_hwfn->mcp_info->mfw_mb_cur);
+ kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+ }
+ kfree(p_hwfn->mcp_info);
+
+ return 0;
+}
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+ p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base)
+ return 0;
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Set the MFW MB address */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+ if (!p_hwfn->mcp_info)
+ goto err;
+ p_info = p_hwfn->mcp_info;
+
+ if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+ DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return 0;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+ p_info->mfw_mb_shadow =
+ kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
+ p_info->mfw_mb_length), GFP_ATOMIC);
+ if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ goto err;
+
+ /* Initialize the MFW mutex */
+ mutex_init(&p_info->mutex);
+
+ return 0;
+
+err:
+ DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
+ qed_mcp_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 org_mcp_reset_seq, cnt = 0;
+ int rc = 0;
+
+ /* Set drv command along with the updated sequence */
+ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
+ (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < QED_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = -EAGAIN;
+ }
+
+ return rc;
+}
+
+static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 seq, cnt = 1, actual_mb_seq;
+ int rc = 0;
+
+ /* Get actual driver mailbox sequence */
+ actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Use MCP history register to check if MCP reset occurred between
+ * init time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+ /* Set drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+ /* Set drv command along with the updated sequence */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "wrote command (%x) to MFW MB param 0x%08x\n",
+ (cmd | seq), param);
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+ (cnt++ < QED_DRV_MB_MAX_RETRIES));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt * delay, *o_mcp_resp, seq);
+
+ /* Is this a reply to our command? */
+ if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+ *o_mcp_resp &= FW_MSG_CODE_MASK;
+ /* Get the MCP param */
+ *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+ } else {
+ /* FW BUG! */
+ DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ *o_mcp_resp = 0;
+ rc = -EAGAIN;
+ }
+ return rc;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ int rc = 0;
+
+ /* MCP not initialized */
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Lock Mutex to ensure only single thread is
+ * accessing the MCP at one time
+ */
+ mutex_lock(&p_hwfn->mcp_info->mutex);
+ rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param);
+ /* Release Mutex */
+ mutex_unlock(&p_hwfn->mcp_info->mutex);
+
+ return rc;
+}
+
+static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 i;
+
+ /* Copy version string to MCP */
+ for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
+ *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 param;
+ int rc;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Save driver's version to shmem */
+ qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+ (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ cdev->drv_type),
+ p_load_code, ¶m);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* If MFW refused (e.g. other port is in diagnostic mode) we
+ * must abort. This can happen in the following cases:
+ * - Other port is in diagnostic mode
+ * - Previously loaded function on the engine is not compliant with
+ * the requester.
+ * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+ * -
+ */
+ if (!(*p_load_code) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+ DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_reset)
+{
+ struct qed_mcp_link_state *p_link;
+ u32 status = 0;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ memset(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+ "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+ status,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link indications\n");
+ return;
+ }
+
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ /* Fall-through */
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ }
+
+ /* Correct speed according to bandwidth allocation */
+ if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+ p_link->speed = p_link->speed *
+ p_hwfn->mcp_info->func_info.bandwidth_max /
+ 100;
+ qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_link->speed);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+ }
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ qed_link_update(p_hwfn);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up)
+{
+ struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ u32 param = 0, reply = 0, cmd;
+ struct pmm_phy_cfg phy_cfg;
+ int rc = 0;
+ u32 i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Set the shmem configuration according to params */
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* Write the requested configuration to shmem */
+ for (i = 0; i < sizeof(phy_cfg); i += 4)
+ qed_wr(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data) + i,
+ ((u32 *)&phy_cfg)[i >> 2]);
+
+ if (b_up) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
+ phy_cfg.speed,
+ phy_cfg.pause,
+ phy_cfg.adv_speed,
+ phy_cfg.loopback_mode,
+ phy_cfg.feature_config_flags);
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link\n");
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, ¶m);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Reset the link status if needed */
+ if (!b_up)
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+ return 0;
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *info = p_hwfn->mcp_info;
+ int rc = 0;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ qed_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+ rc = -EINVAL;
+ }
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ qed_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32),
+ (__force u32)val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Received an MFW message indication but no new message!\n");
+ rc = -EINVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *p_mfw_ver)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+ u32 global_offsize;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ global_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global, mfw_ver));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *p_media_type)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ *p_media_type = MEDIA_UNSPECIFIED;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, media_type));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+ struct public_func *p_info,
+ enum qed_pci_personality *p_proto)
+{
+ int rc = 0;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ *p_proto = QED_PCI_ETH;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+ &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return -EINVAL;
+ }
+
+ if (p_hwfn->cdev->mf_mode != SF) {
+ info->bandwidth_min = (shmem_info.config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ info->bandwidth_min);
+ info->bandwidth_min = 1;
+ }
+
+ info->bandwidth_max = (shmem_info.config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ info->bandwidth_max);
+ info->bandwidth_max = 100;
+ }
+ }
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+ } else {
+ DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+ }
+
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+ (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+ (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac[0], info->mac[1], info->mac[2],
+ info->mac[3], info->mac[4], info->mac[5],
+ info->wwn_port, info->wwn_node, info->ovlan);
+
+ return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 100,
+ &resp, ¶m);
+
+ /* Wait for the drain to complete before returning */
+ msleep(120);
+
+ return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size)
+{
+ u32 flash_size;
+
+ flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+ *p_flash_size = flash_size;
+
+ return 0;
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver)
+{
+ int rc = 0;
+ u32 param = 0, reply = 0, i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
+ p_ver->version);
+ /* Copy version string to shmem */
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
+ DRV_MB_WR(p_hwfn, p_ptt,
+ union_data.drv_version.name[i * sizeof(u32)],
+ *(u32 *)&p_ver->name[i * sizeof(u32)]);
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
+ ¶m);
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644
index 0000000..dbaae58
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -0,0 +1,369 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "qed_hsi.h"
+
+struct qed_mcp_link_speed_params {
+ bool autoneg;
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
+};
+
+struct qed_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+struct qed_mcp_link_params {
+ struct qed_mcp_link_speed_params speed;
+ struct qed_mcp_link_pause_params pause;
+ u32 loopback_mode;
+};
+
+struct qed_mcp_link_capabilities {
+ u32 speed_capabilities;
+};
+
+struct qed_mcp_link_state {
+ bool link_up;
+
+ u32 speed; /* In Mb/s */
+ bool full_duplex;
+
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G BIT(3)
+#define QED_LINK_PARTNER_SPEED_40G BIT(4)
+#define QED_LINK_PARTNER_SPEED_50G BIT(5)
+#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+ u32 partner_adv_speed;
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define QED_LINK_PARTNER_BOTH_PAUSE (3)
+ u8 partner_adv_pause;
+
+ bool sfp_tx_fault;
+};
+
+struct qed_mcp_function_info {
+ u8 pause_on_host;
+
+ enum qed_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define QED_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+};
+
+struct qed_mcp_nvm_common {
+ u32 offset;
+ u32 param;
+ u32 resp;
+ u32 cmd;
+};
+
+struct qed_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct qed_mcp_link_capabilities
+ *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return int
+ */
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - mfw version value
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *mfw_ver);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - media type value
+ *
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *media_type);
+
+/**
+ * @brief General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP.
+ * @param param - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence).
+ * @param o_mcp_param- Optional parameter provided by the MCP
+ * response
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size - flash size in bytes to be filled.
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return int - 0 - operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+struct qed_mcp_info {
+ struct mutex mutex; /* MCP access lock */
+ u32 public_base;
+ u32 drv_mb_addr;
+ u32 mfw_mb_addr;
+ u32 port_addr;
+ u16 drv_mb_seq;
+ u16 drv_pulse_seq;
+ struct qed_mcp_link_params link_input;
+ struct qed_mcp_link_state link_output;
+ struct qed_mcp_link_capabilities link_capabilities;
+ struct qed_mcp_function_info func_info;
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u16 mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ * succeed, returns whether this PF is the first on the
+ * chip/engine/port or function. This function should be
+ * called when driver is ready to accept MFW events after
+ * Storms initializations are done.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_load_code - The MCP response param containing one
+ * of the following:
+ * FW_MSG_CODE_DRV_LOAD_ENGINE
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644
index 0000000..7a5ce59
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -0,0 +1,366 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xff << 24)
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_FCOE \
+ 0x1f0408UL
+#define PRS_REG_SEARCH_ROCE \
+ 0x1f040cUL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644
index 0000000..31a1f1e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -0,0 +1,360 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+ QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ QED_SPQ_MODE_CB, /* Client supplies a callback */
+ QED_SPQ_MODE_EBLOCK, /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+ void (*function)(struct qed_hwfn *,
+ void *,
+ union event_ring_data *,
+ u8 fw_return_code);
+ void *cookie;
+};
+
+/**
+ * @brief qed_eth_cqe_completion - handles the completion of a
+ * ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return int
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+
+/**
+ * @file
+ *
+ * QED Slow-hwfn queue interface
+ */
+
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct vport_update_ramrod_data vport_update;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ QED_SPQ_PRIORITY_NORMAL,
+ QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+ struct qed_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+struct qed_spq_comp_done {
+ u64 done;
+ u8 fw_return_code;
+};
+
+struct qed_spq_entry {
+ struct list_head list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ struct list_head *queue;
+
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb comp_cb;
+ struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct qed_eq {
+ struct qed_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct qed_consq {
+ struct qed_chain chain;
+};
+
+struct qed_spq {
+ spinlock_t lock; /* SPQ lock */
+
+ struct list_head unlimited_pending;
+ struct list_head pending;
+ struct list_head completion_pending;
+ struct list_head free_pool;
+
+ struct qed_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct qed_spq_entry *p_virt;
+
+ /* Used as index for completions (returns on EQ by FW) */
+ u16 echo_idx;
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+};
+
+/**
+ * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return int
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_get_entry - Obtain an entrry from the spq
+ * free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return int
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent);
+
+/**
+ * @brief qed_spq_return_entry - Return an entry to spq free
+ * pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+/**
+ * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem);
+
+/**
+ * @brief qed_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod);
+
+/**
+ * @brief qed_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return int
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * @brief qed_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return int
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+ * struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_setup - Reset the ConsQ to its start
+ * state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @brief qed_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @file
+ *
+ * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+ */
+
+#define QED_SP_EQ_COMPLETION 0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_request_params {
+ size_t ramrod_data_size;
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb *p_comp_data;
+};
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params);
+
+/**
+ * @brief qed_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param mode
+ *
+ * @return int
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode);
+
+/**
+ * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644
index 0000000..6f78791
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -0,0 +1,170 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params)
+{
+ int rc = -EINVAL;
+ struct qed_spq_entry *p_ent = NULL;
+ u32 opaque_cid = opaque_fid << 16 | cid;
+
+ if (!pp_ent)
+ return -ENOMEM;
+
+ rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+ if (rc != 0)
+ return rc;
+
+ p_ent = *pp_ent;
+
+ p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+
+ p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_params->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case QED_SPQ_MODE_BLOCK:
+ if (!p_params->p_comp_data)
+ return -EINVAL;
+
+ p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+ break;
+
+ case QED_SPQ_MODE_CB:
+ if (!p_params->p_comp_data)
+ p_ent->comp_cb.function = NULL;
+ else
+ p_ent->comp_cb = *p_params->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+ opaque_cid, cmd, protocol,
+ (unsigned long)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+ if (p_params->ramrod_data_size)
+ memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+ return 0;
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode)
+{
+ struct qed_sp_init_request_params params;
+ struct pf_start_ramrod_data *p_ramrod = NULL;
+ u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ /* update initial eq producer */
+ qed_eq_prod_update(p_hwfn,
+ qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ memset(¶ms, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn,
+ &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON,
+ ¶ms);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.pf_start;
+
+ p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = QED_PATH_ID(p_hwfn);
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = cpu_to_le16(0xf);
+ p_ramrod->mf_mode = mode;
+ p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ /* Place EQ address in RAMROD */
+ p_ramrod->event_ring_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
+
+ p_ramrod->consolid_q_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_ramrod->consolid_q_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+ p_hwfn->hw_info.personality = PERSONALITY_ETH;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+ sb, sb_index,
+ (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
+ p_ramrod->outer_tag);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sp_init_request_params params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(¶ms, 0, sizeof(params));
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ ¶ms);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644
index 0000000..7c0b845
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/***************************************************************************
+* Structures & Definitions
+***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+/***************************************************************************
+* Blocking Imp. (BLOCK/EBLOCK mode)
+***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_spq_comp_done *comp_done;
+
+ comp_done = (struct qed_spq_comp_done *)cookie;
+
+ comp_done->done = 0x1;
+ comp_done->fw_return_code = fw_return_code;
+
+ /* make update visible to waiting thread */
+ smp_wmb();
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret)
+{
+ int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ struct qed_spq_comp_done *comp_done;
+ int rc;
+
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != 0)
+ DP_NOTICE(p_hwfn, "MCP drain failed\n");
+
+ /* Retry after drain */
+ sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+
+ DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+
+ return -EBUSY;
+}
+
+/***************************************************************************
+* SPQ entries inner API
+***************************************************************************/
+static int
+qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ p_ent->elem.hdr.echo = 0;
+ p_hwfn->p_spq->echo_idx++;
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ case QED_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = qed_spq_blocking_cb;
+ break;
+ case QED_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ p_ent->elem.hdr.cid,
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi,
+ p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return 0;
+}
+
+/***************************************************************************
+* HSI access
+***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq)
+{
+ u16 pq;
+ struct qed_cxt_info cxt_info;
+ struct core_conn_context *p_cxt;
+ union qed_qm_pq_params pq_params;
+ int rc;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+ /* QM physical queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+
+ p_cxt->xstorm_st_context.spq_base_lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+ p_cxt->xstorm_st_context.consolid_base_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.consolid_base_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq,
+ struct qed_spq_entry *p_ent)
+{
+ struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct slow_path_element *elem;
+ struct core_db_data db;
+
+ elem = qed_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+ return -EINVAL;
+ }
+
+ *elem = p_ent->elem; /* struct assignment */
+
+ /* send a doorbell on the slow hwfn session */
+ memset(&db, 0, sizeof(db));
+ SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* validate producer is up to-date */
+ rmb();
+
+ db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+ /* do not reorder */
+ barrier();
+
+ DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+ /* make sure doorbell is rang */
+ mmiowb();
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+ qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
+ p_spq->cid, db.params, db.agg_flags,
+ qed_chain_get_prod_idx(p_chain));
+
+ return 0;
+}
+
+/***************************************************************************
+* Asynchronous events
+***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+}
+
+/***************************************************************************
+* EQ API
+***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod)
+{
+ u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ mmiowb();
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie)
+
+{
+ struct qed_eq *p_eq = cookie;
+ struct qed_chain *p_chain = &p_eq->chain;
+ int rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+ qed_chain_get_usable_per_page(p_chain))
+ fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+ if (!p_eqe) {
+ rc = -EINVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode,
+ p_eqe->protocol_id,
+ p_eqe->reserved0,
+ le16_to_cpu(p_eqe->echo),
+ p_eqe->fw_return_code,
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (qed_async_event_completion(p_hwfn, p_eqe))
+ rc = -EINVAL;
+ } else if (qed_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = -EINVAL;
+ }
+
+ qed_chain_recycle_consumed(p_chain);
+ }
+
+ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+ return rc;
+}
+
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem)
+{
+ struct qed_eq *p_eq;
+
+ /* Allocate EQ struct */
+ p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+ if (!p_eq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ num_elem,
+ sizeof(union event_ring_element),
+ &p_eq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ qed_int_register_cb(p_hwfn,
+ qed_eq_completion,
+ p_eq,
+ &p_eq->eq_sb_index,
+ &p_eq->p_fw_cons);
+
+ return p_eq;
+
+eq_allocate_fail:
+ qed_eq_free(p_hwfn, p_eq);
+ return NULL;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ qed_chain_reset(&p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ if (!p_eq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_eq->chain);
+ kfree(p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static int qed_cqe_completion(
+ struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
+{
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ int rc;
+
+ rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+
+ return rc;
+}
+
+/***************************************************************************
+* Slow hwfn Queue (spq)
+***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ dma_addr_t p_phys = 0;
+ unsigned int i = 0;
+
+ INIT_LIST_HEAD(&p_spq->pending);
+ INIT_LIST_HEAD(&p_spq->completion_pending);
+ INIT_LIST_HEAD(&p_spq->free_pool);
+ INIT_LIST_HEAD(&p_spq->unlimited_pending);
+ spin_lock_init(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ for (i = 0; i < p_spq->chain.capacity; i++) {
+ p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+ p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+ list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct qed_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+ p_spq->echo_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ qed_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ qed_chain_reset(&p_spq->chain);
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ struct qed_spq_entry *p_virt = NULL;
+
+ /* SPQ struct */
+ p_spq =
+ kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+ if (!p_spq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+ return -ENOMEM;
+ }
+
+ /* SPQ ring */
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_SINGLE,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+ goto spq_allocate_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ &p_phys,
+ GFP_KERNEL);
+
+ if (!p_virt)
+ goto spq_allocate_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+ p_hwfn->p_spq = p_spq;
+
+ return 0;
+
+spq_allocate_fail:
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ kfree(p_spq);
+ return -ENOMEM;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (!p_spq)
+ return;
+
+ if (p_spq->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ p_spq->p_virt,
+ p_spq->p_phys);
+
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ ;
+ kfree(p_spq);
+}
+
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+
+ spin_lock_bh(&p_spq->lock);
+
+ if (list_empty(&p_spq->free_pool)) {
+ p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+ if (!p_ent) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_ent->list);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+out_unlock:
+ spin_unlock_bh(&p_spq->lock);
+ return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief qed_spq_add_entry - adds a new entry to the pending
+ * list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return int
+ */
+static int
+qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ struct qed_spq_entry *p_en2;
+
+ if (list_empty(&p_spq->free_pool)) {
+ list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return 0;
+ }
+
+ p_en2 = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_en2->list);
+
+ /* Strcut assignment */
+ *p_en2 = *p_ent;
+
+ kfree(p_ent);
+
+ p_ent = p_en2;
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case QED_SPQ_PRIORITY_NORMAL:
+ list_add_tail(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case QED_SPQ_PRIORITY_HIGH:
+ list_add(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/***************************************************************************
+* Accessor
+***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+* Posting new Ramrods
+***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+ struct list_head *head,
+ u32 keep_reserve)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ int rc;
+
+ while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !list_empty(head)) {
+ struct qed_spq_entry *p_ent =
+ list_first_entry(head, struct qed_spq_entry, list);
+ list_del(&p_ent->list);
+ list_add_tail(&p_ent->list, &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ list_del(&p_ent->list);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+
+ while (!list_empty(&p_spq->free_pool)) {
+ if (list_empty(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = list_first_entry(&p_spq->unlimited_pending,
+ struct qed_spq_entry,
+ list);
+ if (!p_ent)
+ return -EINVAL;
+
+ list_del(&p_ent->list);
+
+ qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ return qed_spq_post_list(p_hwfn, &p_spq->pending,
+ SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ int rc = 0;
+ struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+ bool b_ret_ent = true;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ /* Complete the entry */
+ rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+ spin_lock_bh(&p_spq->lock);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Add the request to the pending queue */
+ rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = qed_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ spin_unlock_bh(&p_spq->lock);
+
+ if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ /* For entries in QED BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ qed_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ spin_lock_bh(&p_spq->lock);
+ list_del(&p_ent->list);
+ qed_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct qed_spq *p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_spq_entry *tmp;
+ struct qed_spq_entry *found = NULL;
+ int rc;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return -EINVAL;
+
+ spin_lock_bh(&p_spq->lock);
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
+ list) {
+ if (p_ent->elem.hdr.echo == echo) {
+ list_del(&p_ent->list);
+
+ qed_chain_return_produced(&p_spq->chain);
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ spin_unlock_bh(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an entry this EQE completes\n");
+ return -EEXIST;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+ /* EBLOCK is responsible for freeing its own entry */
+ qed_spq_return_entry(p_hwfn, found);
+
+ /* Attempt to post pending requests */
+ spin_lock_bh(&p_spq->lock);
+ rc = qed_spq_pend_post(p_hwfn);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_consq *p_consq;
+
+ /* Allocate ConsQ struct */
+ p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+ if (!p_consq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_PAGE_SIZE / 0x80,
+ 0x80,
+ &p_consq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+ goto consq_allocate_fail;
+ }
+
+ return p_consq;
+
+consq_allocate_fail:
+ qed_consq_free(p_hwfn, p_consq);
+ return NULL;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ qed_chain_reset(&p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ if (!p_consq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_consq->chain);
+ kfree(p_consq);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644
index 0000000..06ff90d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644
index 0000000..ea00d5f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -0,0 +1,285 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#define QEDE_MAJOR_VERSION 8
+#define QEDE_MINOR_VERSION 4
+#define QEDE_REVISION_VERSION 0
+#define QEDE_ENGINEERING_VERSION 0
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
+ __stringify(QEDE_MINOR_VERSION) "." \
+ __stringify(QEDE_REVISION_VERSION) "." \
+ __stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_ETH_INTERFACE_VERSION 300
+
+#define DRV_MODULE_SYM qede
+
+struct qede_stats {
+ u64 no_buff_discards;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 coalesced_pkts;
+ u64 coalesced_events;
+ u64 coalesced_aborts_num;
+ u64 non_coalesced_pkts;
+ u64 coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_127_byte_packets;
+ u64 rx_255_byte_packets;
+ u64 rx_511_byte_packets;
+ u64 rx_1023_byte_packets;
+ u64 rx_1518_byte_packets;
+ u64 rx_1522_byte_packets;
+ u64 rx_2047_byte_packets;
+ u64 rx_4095_byte_packets;
+ u64 rx_9216_byte_packets;
+ u64 rx_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_dev {
+ struct qed_dev *cdev;
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+
+ u32 dp_module;
+ u8 dp_level;
+
+ const struct qed_eth_ops *ops;
+
+ struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+ (edev)->dev_info.num_tc)
+
+ struct qede_fastpath *fp_array;
+ u16 num_rss;
+ u8 num_tc;
+#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
+#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \
+ (edev)->num_tc)
+#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss)
+#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss)
+#define QEDE_TX_QUEUE(edev, txqidx) \
+ (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+ (edev), (txqidx))])
+
+ struct qed_int_info int_info;
+ unsigned char primary_mac[ETH_ALEN];
+
+ /* Smaller private varaiant of the RTNL lock */
+ struct mutex qede_lock;
+ u32 state; /* Protected by qede_lock */
+ u16 rx_buf_size;
+ /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+ /* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
+ /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define QEDE_FW_RX_ALIGN_END \
+ max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+ struct qede_stats stats;
+ struct qed_update_vport_rss_params rss_params;
+ u16 q_num_rx_buffers; /* Must be a power of two */
+ u16 q_num_tx_buffers; /* Must be a power of two */
+
+ struct delayed_work sp_task;
+ unsigned long sp_flags;
+};
+
+enum QEDE_STATE {
+ QEDE_STATE_CLOSED,
+ QEDE_STATE_OPEN,
+};
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+#define MAX_NUM_TC 8
+#define MAX_NUM_PRI 8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+ u8 *data;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct qede_rx_queue {
+ __le16 *hw_cons_ptr;
+ struct sw_rx_data *sw_rx_ring;
+ u16 sw_rx_cons;
+ u16 sw_rx_prod;
+ struct qed_chain rx_bd_ring;
+ struct qed_chain rx_comp_ring;
+ void __iomem *hw_rxq_prod_addr;
+
+ int rx_buf_size;
+
+ u16 num_rx_buffers;
+ u16 rxq_id;
+
+ u64 rx_hw_errors;
+ u64 rx_alloc_errors;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ u32 raw;
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD BIT(0)
+};
+
+struct qede_tx_queue {
+ int index; /* Queue index */
+ __le16 *hw_cons_ptr;
+ struct sw_tx_bd *sw_tx_ring;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ struct qed_chain tx_pbl;
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+
+ u16 num_tx_buffers;
+};
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
+ le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
+ (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
+ (bd)->nbytes = cpu_to_le16(len); \
+ } while (0)
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+ struct qede_dev *edev;
+ u8 rss_id;
+ struct napi_struct napi;
+ struct qed_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txqs;
+
+#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+ char name[VEC_NAME_SIZE];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) ((edev)->ndev->name)
+
+#define XMIT_PLAIN 0
+#define XMIT_L4_CSUM BIT(0)
+#define XMIT_LSO BIT(1)
+#define XMIT_ENC BIT(2)
+
+#define QEDE_CSUM_ERROR BIT(0)
+#define QEDE_CSUM_UNNECESSARY BIT(1)
+
+#define QEDE_SP_RX_MODE 1
+
+union qede_reload_args {
+ u16 mtu;
+};
+
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *edev,
+ union qede_reload_args *args),
+ union qede_reload_args *args);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+
+#define RX_RING_SIZE_POW 13
+#define RX_RING_SIZE BIT(RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+
+#define TX_RING_SIZE_POW 13
+#define TX_RING_SIZE BIT(TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+
+#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+
+#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644
index 0000000..3a36247
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -0,0 +1,385 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include "qede.h"
+
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+ {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rx_hw_errors),
+ QEDE_RQSTAT(rx_alloc_errors),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
+ (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
+ qede_rqstats_arr[(sindex)].offset)))
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+ bool pf_only;
+} qede_stats_arr[] = {
+ QEDE_STAT(rx_ucast_bytes),
+ QEDE_STAT(rx_mcast_bytes),
+ QEDE_STAT(rx_bcast_bytes),
+ QEDE_STAT(rx_ucast_pkts),
+ QEDE_STAT(rx_mcast_pkts),
+ QEDE_STAT(rx_bcast_pkts),
+
+ QEDE_STAT(tx_ucast_bytes),
+ QEDE_STAT(tx_mcast_bytes),
+ QEDE_STAT(tx_bcast_bytes),
+ QEDE_STAT(tx_ucast_pkts),
+ QEDE_STAT(tx_mcast_pkts),
+ QEDE_STAT(tx_bcast_pkts),
+
+ QEDE_PF_STAT(rx_64_byte_packets),
+ QEDE_PF_STAT(rx_127_byte_packets),
+ QEDE_PF_STAT(rx_255_byte_packets),
+ QEDE_PF_STAT(rx_511_byte_packets),
+ QEDE_PF_STAT(rx_1023_byte_packets),
+ QEDE_PF_STAT(rx_1518_byte_packets),
+ QEDE_PF_STAT(rx_1522_byte_packets),
+ QEDE_PF_STAT(rx_2047_byte_packets),
+ QEDE_PF_STAT(rx_4095_byte_packets),
+ QEDE_PF_STAT(rx_9216_byte_packets),
+ QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(tx_64_byte_packets),
+ QEDE_PF_STAT(tx_65_to_127_byte_packets),
+ QEDE_PF_STAT(tx_128_to_255_byte_packets),
+ QEDE_PF_STAT(tx_256_to_511_byte_packets),
+ QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
+
+ QEDE_PF_STAT(rx_mac_crtl_frames),
+ QEDE_PF_STAT(tx_mac_ctrl_frames),
+ QEDE_PF_STAT(rx_pause_frames),
+ QEDE_PF_STAT(tx_pause_frames),
+ QEDE_PF_STAT(rx_pfc_frames),
+ QEDE_PF_STAT(tx_pfc_frames),
+
+ QEDE_PF_STAT(rx_crc_errors),
+ QEDE_PF_STAT(rx_align_errors),
+ QEDE_PF_STAT(rx_carrier_errors),
+ QEDE_PF_STAT(rx_oversize_packets),
+ QEDE_PF_STAT(rx_jabbers),
+ QEDE_PF_STAT(rx_undersize_packets),
+ QEDE_PF_STAT(rx_fragments),
+ QEDE_PF_STAT(tx_lpi_entry_count),
+ QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_STAT(brb_truncates),
+ QEDE_PF_STAT(brb_discards),
+ QEDE_STAT(no_buff_discards),
+ QEDE_PF_STAT(mftag_filter_discards),
+ QEDE_PF_STAT(mac_filter_discards),
+ QEDE_STAT(tx_err_drop_pkts),
+
+ QEDE_STAT(coalesced_pkts),
+ QEDE_STAT(coalesced_events),
+ QEDE_STAT(coalesced_aborts_num),
+ QEDE_STAT(non_coalesced_pkts),
+ QEDE_STAT(coalesced_bytes),
+};
+
+#define QEDE_STATS_DATA(dev, index) \
+ (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
+ + qede_stats_arr[(index)].offset)))
+
+#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+ int i, j, k;
+
+ for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_stats_arr[i].string);
+ j++;
+ }
+
+ for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_rqstats_arr[k].string);
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ qede_get_strings_stats(edev, buf);
+ break;
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ }
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int sidx, cnt = 0;
+ int qid;
+
+ qede_fill_by_demand_stats(edev);
+
+ mutex_lock(&edev->qede_lock);
+
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+
+ for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
+ buf[cnt] = 0;
+ for (qid = 0; qid < edev->num_rss; qid++)
+ buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
+ cnt++;
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int num_stats = QEDE_NUM_STATS;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return num_stats + QEDE_NUM_RQSTATS;
+
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ return -EINVAL;
+ }
+}
+
+static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(¤t_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, ¤t_link);
+
+ cmd->supported = current_link.supported_caps;
+ cmd->advertising = current_link.advertised_caps;
+ if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+ ethtool_cmd_speed_set(cmd, current_link.speed);
+ cmd->duplex = current_link.duplex;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ }
+ cmd->port = current_link.port;
+ cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+ cmd->lp_advertising = current_link.lp_caps;
+
+ return 0;
+}
+
+static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+ u32 speed;
+
+ if (edev->dev_info.common.is_mf) {
+ DP_INFO(edev,
+ "Link parameters can not be changed in MF mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(¤t_link, 0, sizeof(current_link));
+ memset(¶ms, 0, sizeof(params));
+ edev->ops->common->get_link(edev->cdev, ¤t_link);
+
+ speed = ethtool_cmd_speed(cmd);
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ params.autoneg = true;
+ params.forced_speed = 0;
+ params.adv_speeds = cmd->advertising;
+ } else { /* forced speed */
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+ params.autoneg = false;
+ params.forced_speed = speed;
+ switch (speed) {
+ case SPEED_10000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_10000baseKR_Full)) {
+ DP_INFO(edev, "10G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_10000baseKR_Full;
+ break;
+ case SPEED_40000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_40000baseLR4_Full)) {
+ DP_INFO(edev, "40G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+ break;
+ default:
+ DP_INFO(edev, "Unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+ }
+
+ params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, ¶ms);
+
+ return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ strlcpy(info->driver, "qede", sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+ snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ edev->dev_info.common.fw_major,
+ edev->dev_info.common.fw_minor,
+ edev->dev_info.common.fw_rev,
+ edev->dev_info.common.fw_eng);
+
+ snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+ edev->dev_info.common.mfw_rev & 0xFF);
+
+ if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
+ sizeof(info->fw_version)) {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "mfw %s storm %s", mfw, storm);
+ } else {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%s %s", mfw, storm);
+ }
+
+ strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
+ edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(level, &dp_module, &dp_level);
+
+ edev->dp_level = dp_level;
+ edev->dp_module = dp_module;
+ edev->ops->common->update_msglvl(edev->cdev,
+ dp_module, dp_level);
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(¤t_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, ¤t_link);
+
+ return current_link.link_up;
+}
+
+static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+{
+ edev->ndev->mtu = args->mtu;
+}
+
+/* Netdevice NDOs */
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+#define ETH_MIN_PACKET_SIZE 60
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ union qede_reload_args args;
+
+ if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+ DP_ERR(edev, "Can't support requested MTU size\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Configuring MTU size of %d\n", new_mtu);
+
+ /* Set the mtu field and re-start the interface if needed*/
+ args.mtu = new_mtu;
+
+ if (netif_running(edev->ndev))
+ qede_reload(edev, &qede_update_mtu, &args);
+
+ qede_update_mtu(edev, &args);
+
+ return 0;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .set_settings = qede_set_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_sset_count = qede_get_sset_count,
+
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644
index 0000000..f4657a2
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -0,0 +1,2584 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/vxlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+
+#include "qede.h"
+
+static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
+ DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_10 0x1635
+#define CHIP_NUM_57980S_MF 0x1636
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_57980S_25 0x1656
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#endif
+
+static const struct pci_device_id qede_pci_tbl[] = {
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+
+#define TX_TIMEOUT (5 * HZ)
+
+static void qede_remove(struct pci_dev *pdev);
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+
+static struct pci_driver qede_pci_driver = {
+ .name = "qede",
+ .id_table = qede_pci_tbl,
+ .probe = qede_probe,
+ .remove = qede_remove,
+};
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+ {
+ .link_update = qede_link_update,
+ },
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_drvinfo drvinfo;
+ struct qede_dev *edev;
+
+ /* Currently only support name change */
+ if (event != NETDEV_CHANGENAME)
+ goto done;
+
+ /* Check whether this is a qede device */
+ if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+ goto done;
+
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+ if (strcmp(drvinfo.driver, "qede"))
+ goto done;
+ edev = netdev_priv(ndev);
+
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_id(edev->cdev, edev->ndev->name,
+ "qede");
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+ .notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+ int ret;
+ u32 qed_ver;
+
+ pr_notice("qede_init: %s\n", version);
+
+ qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+ if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
+ pr_notice("Version mismatch [%08x != %08x]\n",
+ qed_ver,
+ QEDE_ETH_INTERFACE_VERSION);
+ return -EINVAL;
+ }
+
+ qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ if (!qed_ops) {
+ pr_notice("Failed to get qed ethtool operations\n");
+ return -EINVAL;
+ }
+
+ /* Must register notifier before pci ops, since we might miss
+ * interface rename after pci probe and netdev registeration.
+ */
+ ret = register_netdevice_notifier(&qede_netdev_notifier);
+ if (ret) {
+ pr_notice("Failed to register netdevice_notifier\n");
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ ret = pci_register_driver(&qede_pci_driver);
+ if (ret) {
+ pr_notice("Failed to register driver\n");
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+ pr_notice("qede_cleanup called\n");
+
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ pci_unregister_driver(&qede_pci_driver);
+ qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+/* -------------------------------------------------------------------------
+ * START OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+/* Unmap the data and free skb */
+static int qede_free_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ int *len)
+{
+ u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_bd *tx_data_bd;
+ int bds_consumed = 0;
+ int nbds;
+ bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+ int i, split_bd_len = 0;
+
+ if (unlikely(!skb)) {
+ DP_ERR(edev,
+ "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+ idx, txq->sw_tx_cons, txq->sw_tx_prod);
+ return -1;
+ }
+
+ *len = skb->len;
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ bds_consumed++;
+
+ nbds = first_bd->data.nbds;
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ bds_consumed++;
+ }
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ while (bds_consumed++ < nbds)
+ qed_chain_consume(&txq->tx_pbl);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+
+ return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *first_bd,
+ int nbd,
+ bool data_split)
+{
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_bd *tx_data_bd;
+ int i, split_bd_len = 0;
+
+ /* Return prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ nbd--;
+ }
+
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < nbd; i++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ if (tx_data_bd->nbytes)
+ dma_unmap_page(&edev->pdev->dev,
+ BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ /* Return again prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct qede_dev *edev,
+ struct sk_buff *skb,
+ int *ipv6_ext)
+{
+ u32 rc = XMIT_L4_CSUM;
+ __be16 l3_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return XMIT_PLAIN;
+
+ l3_proto = vlan_get_protocol(skb);
+ if (l3_proto == htons(ETH_P_IPV6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *ipv6_ext = 1;
+
+ if (skb_is_gso(skb))
+ rc |= XMIT_LSO;
+
+ return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+ struct eth_tx_2nd_bd *second_bd,
+ struct eth_tx_3rd_bd *third_bd)
+{
+ u8 l4_proto;
+ u16 bd2_bits = 0, bd2_bits2 = 0;
+
+ bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+ bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+ bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ else
+ l4_proto = ip_hdr(skb)->protocol;
+
+ if (l4_proto == IPPROTO_UDP)
+ bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+ if (third_bd) {
+ third_bd->data.bitfields |=
+ ((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
+ }
+
+ second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+ second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_dev *edev,
+ skb_frag_t *frag,
+ struct eth_tx_bd *bd)
+{
+ dma_addr_t mapping;
+
+ /* Map skb non-linear frag data for DMA */
+ mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+ return -ENOMEM;
+ }
+
+ /* Setup the data pointer of the frag data */
+ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+ return 0;
+}
+
+/* Main transmit function */
+static
+netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct netdev_queue *netdev_txq;
+ struct qede_tx_queue *txq;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_2nd_bd *second_bd = NULL;
+ struct eth_tx_3rd_bd *third_bd = NULL;
+ struct eth_tx_bd *tx_data_bd = NULL;
+ u16 txq_index;
+ u8 nbd = 0;
+ dma_addr_t mapping;
+ int rc, frag_idx = 0, ipv6_ext = 0;
+ u8 xmit_type;
+ u16 idx;
+ u16 hlen;
+ bool data_split;
+
+ /* Get tx-queue context and netdev index */
+ txq_index = skb_get_queue_mapping(skb);
+ WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+ txq = QEDE_TX_QUEUE(edev, txq_index);
+ netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+ /* Current code doesn't support SKB linearization, since the max number
+ * of skb frags can be passed in the FW HSI.
+ */
+ BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
+
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
+ (MAX_SKB_FRAGS + 1));
+
+ xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = (struct eth_tx_1st_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ return NETDEV_TX_OK;
+ }
+ nbd++;
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* In case there is IPv6 with extension headers or LSO we need 2nd and
+ * 3rd BDs.
+ */
+ if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+ second_bd = (struct eth_tx_2nd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(second_bd, 0, sizeof(*second_bd));
+
+ nbd++;
+ third_bd = (struct eth_tx_3rd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(third_bd, 0, sizeof(*third_bd));
+
+ nbd++;
+ /* We need to fill in additional data in second_bd... */
+ tx_data_bd = (struct eth_tx_bd *)second_bd;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Fill the parsing flags & params according to the requested offload */
+ if (xmit_type & XMIT_L4_CSUM) {
+ /* We don't re-calculate IP checksum as it is already done by
+ * the upper stack
+ */
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+ /* If the packet is IPv6 with extension header, indicate that
+ * to FW and pass few params, since the device cracker doesn't
+ * support parsing IPv6 with extension header/s.
+ */
+ if (unlikely(ipv6_ext))
+ qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+ }
+
+ if (xmit_type & XMIT_LSO) {
+ first_bd->data.bd_flags.bitfields |=
+ (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+ third_bd->data.lso_mss =
+ cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data;
+
+ /* @@@TBD - if will not be removed need to check */
+ third_bd->data.bitfields |=
+ (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+ /* Make life easier for FW guys who can't deal with header and
+ * data on same BD. If we need to split, use the second bd...
+ */
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "TSO split header size is %d (%x:%x)\n",
+ first_bd->nbytes, first_bd->addr.hi,
+ first_bd->addr.lo);
+
+ mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+ le32_to_cpu(first_bd->addr.lo)) +
+ hlen;
+
+ BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+ le16_to_cpu(first_bd->nbytes) -
+ hlen);
+
+ /* this marks the BD as one that has no
+ * individual mapping
+ */
+ txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+ first_bd->nbytes = cpu_to_le16(hlen);
+
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ data_split = true;
+ }
+ }
+
+ /* Handle fragmented skb */
+ /* special handle for frags inside 2nd and 3rd bds.. */
+ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+
+ if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ else
+ tx_data_bd = NULL;
+
+ frag_idx++;
+ }
+
+ /* map last frags into 4th, 5th .... */
+ for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+
+ memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = nbd;
+
+ netdev_tx_sent_queue(netdev_txq, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ /* Advance packet producer only before sending the packet since mapping
+ * of pages may fail.
+ */
+ txq->sw_tx_prod++;
+
+ /* 'next page' entries are counted in the producer value */
+ txq->tx_db.data.bd_prod =
+ cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+ < (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Stop queue was called\n");
+ /* paired memory barrier is in qede_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons
+ */
+ smp_mb();
+
+ if (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1) &&
+ (edev->state == QEDE_STATE_OPEN)) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Wake queue was called\n");
+ }
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+ u16 hw_bd_cons;
+
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+ return 0;
+
+ return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static int qede_tx_int(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+ u16 hw_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ int rc;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ int len = 0;
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+ hw_bd_cons,
+ qed_chain_get_cons_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+ /* Taking tx_lock is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in qede_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(netdev_txq)) &&
+ (edev->state == QEDE_STATE_OPEN) &&
+ (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+ "Wake queue was called\n");
+ }
+
+ __netif_tx_unlock(netdev_txq);
+ }
+
+ return 0;
+}
+
+static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ return hw_comp_cons != sw_comp_cons;
+}
+
+static bool qede_has_tx_work(struct qede_fastpath *fp)
+{
+ u8 tc;
+
+ for (tc = 0; tc < fp->edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ return true;
+ return false;
+}
+
+/* This function copies the Rx buffer from the CONS position to the PROD
+ * position, since we failed to allocate a new Rx buffer.
+ */
+static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+{
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *sw_rx_data_cons =
+ &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ struct sw_rx_data *sw_rx_data_prod =
+ &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ dma_unmap_addr_set(sw_rx_data_prod, mapping,
+ dma_unmap_addr(sw_rx_data_cons, mapping));
+
+ sw_rx_data_prod->data = sw_rx_data_cons->data;
+ memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+
+ rxq->sw_rx_cons++;
+ rxq->sw_rx_prod++;
+}
+
+static inline void qede_update_rx_prod(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+ u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = {0};
+
+ /* Update producers */
+ rx_prods.bd_prod = cpu_to_le16(bd_prod);
+ rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (u32 *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
+static u32 qede_get_rxhash(struct qede_dev *edev,
+ u8 bitfields,
+ __le32 rss_hash,
+ enum pkt_hash_types *rxhash_type)
+{
+ enum rss_hash_type htype;
+
+ htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+
+ if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
+ *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ return le32_to_cpu(rss_hash);
+ }
+ *rxhash_type = PKT_HASH_TYPE_NONE;
+ return 0;
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+ skb_checksum_none_assert(skb);
+
+ if (csum_flag & QEDE_CSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ if (vlan_tag)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+
+ napi_gro_receive(&fp->napi, skb);
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 csum = 0;
+
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ csum = QEDE_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return csum;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_dev *edev = fp->edev;
+ struct qede_rx_queue *rxq = fp->rxq;
+
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
+ int rx_pkt = 0;
+ u8 csum_flag;
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Loop to complete all indicated BDs */
+ while (sw_comp_cons != hw_comp_cons) {
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ enum pkt_hash_types rxhash_type;
+ enum eth_rx_cqe_type cqe_type;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ struct sk_buff *skb;
+ u16 len, pad;
+ u32 rx_hash;
+ u8 *data;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)
+ qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ edev->ops->eth_cqe_completion(
+ edev->cdev, fp->rss_id,
+ (struct eth_slow_path_rx_cqe *)cqe);
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ data = sw_rx_data->data;
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->pkt_len);
+ pad = fp_cqe->placement_offset;
+
+ /* For every Rx BD consumed, we allocate a new BD so the BD ring
+ * is always with a fixed size. If allocation fails, we take the
+ * consumed BD and return it to the ring in the PROD position.
+ * The packet that was received on that BD will be dropped (and
+ * not passed to the upper stack).
+ */
+ if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(sw_rx_data, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ /* If this is an error packet then drop it */
+ parse_flag =
+ le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
+ csum_flag = qede_check_csum(parse_flag);
+ if (csum_flag == QEDE_CSUM_ERROR) {
+ DP_NOTICE(edev,
+ "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ kfree(data);
+ goto next_rx;
+ }
+
+ skb = build_skb(data, 0);
+
+ if (unlikely(!skb)) {
+ DP_NOTICE(edev,
+ "Build_skb failed, dropping incoming packet\n");
+ kfree(data);
+ rxq->rx_alloc_errors++;
+ goto next_rx;
+ }
+
+ skb_reserve(skb, pad);
+
+ } else {
+ DP_NOTICE(edev,
+ "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
+ qede_reuse_rx_data(rxq);
+ rxq->rx_alloc_errors++;
+ goto next_cqe;
+ }
+
+ sw_rx_data->data = NULL;
+
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+
+ rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
+ fp_cqe->rss_hash,
+ &rxhash_type);
+
+ skb_set_hash(skb, rx_hash, rxhash_type);
+
+ qede_set_skb_csum(skb, csum_flag);
+
+ skb_record_rx_queue(skb, fp->rss_id);
+
+ qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+
+next_rx:
+ rxq->sw_rx_cons++;
+ rx_pkt++;
+
+next_cqe: /* don't consume bd rx buffer */
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ /* CR TPA - revisit how to handle budget in TPA perhaps
+ * increase on "end"
+ */
+ if (rx_pkt == budget)
+ break;
+ } /* repeat while sw_comp_cons != hw_comp_cons... */
+
+ /* Update producers */
+ qede_update_rx_prod(edev, rxq);
+
+ return rx_pkt;
+}
+
+static int qede_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+ struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+ napi);
+ struct qede_dev *edev = fp->edev;
+
+ while (1) {
+ u8 tc;
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ if (qede_has_rx_work(fp->rxq)) {
+ work_done += qede_rx_int(fp, budget - work_done);
+
+ /* must not complete if we consumed full budget */
+ if (work_done >= budget)
+ break;
+ }
+
+ /* Fall out from the NAPI loop if needed */
+ if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
+
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ break;
+ }
+ }
+ }
+
+ return work_done;
+}
+
+static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct qede_fastpath *fp = fp_cookie;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+ napi_schedule_irqoff(&fp->napi);
+ return IRQ_HANDLED;
+}
+
+/* -------------------------------------------------------------------------
+ * END OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+static int qede_set_mac_addr(struct net_device *ndev, void *p);
+static void qede_set_rx_mode(struct net_device *ndev);
+static void qede_config_rx_mode(struct net_device *ndev);
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char mac[ETH_ALEN])
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.mac_valid = 1;
+ ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+ struct qed_eth_stats stats;
+
+ edev->ops->get_vport_stats(edev->cdev, &stats);
+ edev->stats.no_buff_discards = stats.no_buff_discards;
+ edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+ edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+ edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+ edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+ edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+ edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+ edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+ edev->stats.mac_filter_discards = stats.mac_filter_discards;
+
+ edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+ edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+ edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+ edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+ edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+ edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+ edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+ edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+ edev->stats.coalesced_events = stats.tpa_coalesced_events;
+ edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+ edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+ edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+
+ edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
+ edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
+ edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
+ edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
+ edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
+ edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
+ edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
+ edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
+ edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
+ edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
+ edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_crc_errors = stats.rx_crc_errors;
+ edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
+ edev->stats.rx_pause_frames = stats.rx_pause_frames;
+ edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
+ edev->stats.rx_align_errors = stats.rx_align_errors;
+ edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
+ edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
+ edev->stats.rx_jabbers = stats.rx_jabbers;
+ edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
+ edev->stats.rx_fragments = stats.rx_fragments;
+ edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
+ edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
+ edev->stats.tx_128_to_255_byte_packets =
+ stats.tx_128_to_255_byte_packets;
+ edev->stats.tx_256_to_511_byte_packets =
+ stats.tx_256_to_511_byte_packets;
+ edev->stats.tx_512_to_1023_byte_packets =
+ stats.tx_512_to_1023_byte_packets;
+ edev->stats.tx_1024_to_1518_byte_packets =
+ stats.tx_1024_to_1518_byte_packets;
+ edev->stats.tx_1519_to_2047_byte_packets =
+ stats.tx_1519_to_2047_byte_packets;
+ edev->stats.tx_2048_to_4095_byte_packets =
+ stats.tx_2048_to_4095_byte_packets;
+ edev->stats.tx_4096_to_9216_byte_packets =
+ stats.tx_4096_to_9216_byte_packets;
+ edev->stats.tx_9217_to_16383_byte_packets =
+ stats.tx_9217_to_16383_byte_packets;
+ edev->stats.tx_pause_frames = stats.tx_pause_frames;
+ edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+ edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+ edev->stats.tx_total_collisions = stats.tx_total_collisions;
+ edev->stats.brb_truncates = stats.brb_truncates;
+ edev->stats.brb_discards = stats.brb_discards;
+ edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+}
+
+static struct rtnl_link_stats64 *qede_get_stats64(
+ struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ qede_fill_by_demand_stats(edev);
+
+ stats->rx_packets = edev->stats.rx_ucast_pkts +
+ edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+ stats->tx_packets = edev->stats.tx_ucast_pkts +
+ edev->stats.tx_mcast_pkts +
+ edev->stats.tx_bcast_pkts;
+
+ stats->rx_bytes = edev->stats.rx_ucast_bytes +
+ edev->stats.rx_mcast_bytes +
+ edev->stats.rx_bcast_bytes;
+
+ stats->tx_bytes = edev->stats.tx_ucast_bytes +
+ edev->stats.tx_mcast_bytes +
+ edev->stats.tx_bcast_bytes;
+
+ stats->tx_errors = edev->stats.tx_err_drop_pkts;
+ stats->multicast = edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+
+ stats->rx_fifo_errors = edev->stats.no_buff_discards;
+
+ stats->collisions = edev->stats.tx_total_collisions;
+ stats->rx_crc_errors = edev->stats.rx_crc_errors;
+ stats->rx_frame_errors = edev->stats.rx_align_errors;
+
+ return stats;
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_get_stats64 = qede_get_stats64,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+ struct pci_dev *pdev,
+ struct qed_dev_eth_info *info,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct net_device *ndev;
+ struct qede_dev *edev;
+
+ ndev = alloc_etherdev_mqs(sizeof(*edev),
+ info->num_queues,
+ info->num_queues);
+ if (!ndev) {
+ pr_err("etherdev allocation failed\n");
+ return NULL;
+ }
+
+ edev = netdev_priv(ndev);
+ edev->ndev = ndev;
+ edev->cdev = cdev;
+ edev->pdev = pdev;
+ edev->dp_module = dp_module;
+ edev->dp_level = dp_level;
+ edev->ops = qed_ops;
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, info, sizeof(*info));
+
+ edev->num_tc = edev->dev_info.num_tc;
+
+ return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+ struct net_device *ndev = edev->ndev;
+ struct pci_dev *pdev = edev->pdev;
+ u32 hw_features;
+
+ pci_set_drvdata(pdev, ndev);
+
+ ndev->mem_start = edev->dev_info.common.pci_mem_start;
+ ndev->base_addr = ndev->mem_start;
+ ndev->mem_end = edev->dev_info.common.pci_mem_end;
+ ndev->irq = edev->dev_info.common.pci_irq;
+
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ ndev->netdev_ops = &qede_netdev_ops;
+
+ qede_set_ethtool_ops(ndev);
+
+ /* user-changeble features */
+ hw_features = NETIF_F_GRO | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HIGHDMA;
+ ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ ndev->hw_features = hw_features;
+
+ /* Set network device HW mac */
+ ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+ *p_dp_level = QED_LEVEL_NOTICE;
+ *p_dp_module = 0;
+
+ if (debug & QED_LOG_VERBOSE_MASK) {
+ *p_dp_level = QED_LEVEL_VERBOSE;
+ *p_dp_module = (debug & 0x3FFFFFFF);
+ } else if (debug & QED_LOG_INFO_MASK) {
+ *p_dp_level = QED_LEVEL_INFO;
+ } else if (debug & QED_LOG_NOTICE_MASK) {
+ *p_dp_level = QED_LEVEL_NOTICE;
+ }
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+ if (edev->fp_array) {
+ struct qede_fastpath *fp;
+ int i;
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ kfree(fp->sb_info);
+ kfree(fp->rxq);
+ kfree(fp->txqs);
+ }
+ kfree(edev->fp_array);
+ }
+ edev->num_rss = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+ struct qede_fastpath *fp;
+ int i;
+
+ edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+ sizeof(*edev->fp_array), GFP_KERNEL);
+ if (!edev->fp_array) {
+ DP_NOTICE(edev, "fp array allocation failed\n");
+ goto err;
+ }
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ if (!fp->sb_info) {
+ DP_NOTICE(edev, "sb info struct allocation failed\n");
+ goto err;
+ }
+
+ fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq) {
+ DP_NOTICE(edev, "RXQ struct allocation failed\n");
+ goto err;
+ }
+
+ fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
+ if (!fp->txqs) {
+ DP_NOTICE(edev, "TXQ array allocation failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ qede_free_fp_array(edev);
+ return -ENOMEM;
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ sp_task.work);
+ mutex_lock(&edev->qede_lock);
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ qede_config_rx_mode(edev->ndev);
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+ struct qed_pf_params pf_params;
+
+ /* 16 rx + 16 tx */
+ memset(&pf_params, 0, sizeof(struct qed_pf_params));
+ pf_params.eth_pf_params.num_cons = 32;
+ qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+enum qede_probe_mode {
+ QEDE_PROBE_NORMAL,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ enum qede_probe_mode mode)
+{
+ struct qed_slowpath_params params;
+ struct qed_dev_eth_info dev_info;
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+ int rc;
+
+ if (unlikely(dp_level & QED_LEVEL_INFO))
+ pr_notice("Starting qede probe\n");
+
+ cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
+ dp_module, dp_level);
+ if (!cdev) {
+ rc = -ENODEV;
+ goto err0;
+ }
+
+ qede_update_pf_params(cdev);
+
+ /* Start the Slowpath-process */
+ memset(¶ms, 0, sizeof(struct qed_slowpath_params));
+ params.int_mode = QED_INT_MODE_MSIX;
+ params.drv_major = QEDE_MAJOR_VERSION;
+ params.drv_minor = QEDE_MINOR_VERSION;
+ params.drv_rev = QEDE_REVISION_VERSION;
+ params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, ¶ms);
+ if (rc) {
+ pr_notice("Cannot start slowpath\n");
+ goto err1;
+ }
+
+ /* Learn information crucial for qede to progress */
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto err2;
+
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ qede_init_ndev(edev);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err3;
+ }
+
+ edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+
+ edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+
+ DP_INFO(edev, "Ending successfully qede probe\n");
+
+ return 0;
+
+err3:
+ free_netdev(edev->ndev);
+err2:
+ qed_ops->common->slowpath_stop(cdev);
+err1:
+ qed_ops->common->remove(cdev);
+err0:
+ return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(debug, &dp_module, &dp_level);
+
+ return __qede_probe(pdev, dp_module, dp_level,
+ QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+ QEDE_REMOVE_NORMAL,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_INFO(edev, "Starting qede_remove\n");
+
+ cancel_delayed_work_sync(&edev->sp_task);
+ unregister_netdev(ndev);
+
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(ndev);
+
+ /* Use global ops since we've freed edev */
+ qed_ops->common->slowpath_stop(cdev);
+ qed_ops->common->remove(cdev);
+
+ pr_notice("Ending successfully qede_remove\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+ int rc;
+ u16 rss_num;
+
+ /* Setup queues according to possible resources*/
+ rss_num = netif_get_num_default_rss_queues() *
+ edev->dev_info.common.num_hwfns;
+
+ rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+ rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+ if (rc > 0) {
+ /* Managed to request interrupts for our queues */
+ edev->num_rss = rc;
+ DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+ QEDE_RSS_CNT(edev), rss_num);
+ rc = 0;
+ }
+ return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info)
+{
+ if (sb_info->sb_virt)
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+ sizeof(*sb_virt),
+ &sb_phys, GFP_KERNEL);
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 i;
+
+ for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+ struct sw_rx_data *rx_buf;
+ u8 *data;
+
+ rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+ data = rx_buf->data;
+
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->data = NULL;
+ kfree(data);
+ }
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ /* Free rx buffers */
+ qede_free_rx_buffers(edev, rxq);
+
+ /* Free the parallel SW ring */
+ kfree(rxq->sw_rx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ u16 rx_buf_size;
+ u8 *data;
+
+ rx_buf_size = rxq->rx_buf_size;
+
+ data = kmalloc(rx_buf_size, GFP_ATOMIC);
+ if (unlikely(!data)) {
+ DP_NOTICE(edev, "Failed to allocate Rx data\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(&edev->pdev->dev, data,
+ rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ kfree(data);
+ DP_NOTICE(edev, "Failed to map Rx buffer\n");
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->data = data;
+
+ dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+ rxq->sw_rx_prod++;
+
+ return 0;
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ int i, rc, size, num_allocated;
+
+ rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+ rxq->rx_buf_size = NET_IP_ALIGN +
+ ETH_OVERHEAD +
+ edev->ndev->mtu +
+ QEDE_FW_RX_ALIGN_END;
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+ rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!rxq->sw_rx_ring) {
+ DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ /* Allocate FW Rx ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ NUM_RX_BDS_MAX,
+ sizeof(struct eth_rx_bd),
+ &rxq->rx_bd_ring);
+
+ if (rc)
+ goto err;
+
+ /* Allocate FW completion ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ NUM_RX_BDS_MAX,
+ sizeof(union eth_rx_cqe),
+ &rxq->rx_comp_ring);
+ if (rc)
+ goto err;
+
+ /* Allocate buffers for the Rx ring */
+ for (i = 0; i < rxq->num_rx_buffers; i++) {
+ rc = qede_alloc_rx_buffer(edev, rxq);
+ if (rc)
+ break;
+ }
+ num_allocated = i;
+ if (!num_allocated) {
+ DP_ERR(edev, "Rx buffers allocation failed\n");
+ goto err;
+ } else if (num_allocated < rxq->num_rx_buffers) {
+ DP_NOTICE(edev,
+ "Allocated less buffers than desired (%d allocated)\n",
+ num_allocated);
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_rxq(edev, rxq);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ /* Free the parallel SW ring */
+ kfree(txq->sw_tx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ int size, rc;
+ union eth_tx_bd_types *p_virt;
+
+ txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+ /* Allocate the parallel driver ring for Tx buffers */
+ size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring) {
+ DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ NUM_TX_BDS_MAX,
+ sizeof(*p_virt),
+ &txq->tx_pbl);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ qede_free_mem_txq(edev, txq);
+ return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int tc;
+
+ qede_free_mem_sb(edev, fp->sb_info);
+
+ qede_free_mem_rxq(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ qede_free_mem_txq(edev, &fp->txqs[tc]);
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int rc, tc;
+
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+ if (rc)
+ goto err;
+
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ if (rc)
+ goto err;
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_fp(edev, fp);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ qede_free_mem_fp(edev, fp);
+ }
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+ int rc = 0, rss_id;
+
+ for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[rss_id];
+
+ rc = qede_alloc_mem_fp(edev, fp);
+ if (rc)
+ break;
+ }
+
+ if (rss_id != QEDE_RSS_CNT(edev)) {
+ /* Failed allocating memory for all the queues */
+ if (!rss_id) {
+ DP_ERR(edev,
+ "Failed to allocate memory for the leading queue\n");
+ rc = -ENOMEM;
+ } else {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
+ QEDE_RSS_CNT(edev), rss_id);
+ }
+ edev->num_rss = rss_id;
+ }
+
+ return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+ int rss_id, txq_index, tc;
+ struct qede_fastpath *fp;
+
+ for_each_rss(rss_id) {
+ fp = &edev->fp_array[rss_id];
+
+ fp->edev = edev;
+ fp->rss_id = rss_id;
+
+ memset((void *)&fp->napi, 0, sizeof(fp->napi));
+
+ memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+
+ memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+ fp->rxq->rxq_id = rss_id;
+
+ memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
+ fp->txqs[tc].index = txq_index;
+ }
+
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ edev->ndev->name, rss_id);
+ }
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+
+ netif_napi_del(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rss(i) {
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
+ qede_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+ int i;
+
+ for (i = 0; i < edev->int_info.used_cnt; i++) {
+ if (edev->int_info.msix_cnt) {
+ synchronize_irq(edev->int_info.msix[i].vector);
+ free_irq(edev->int_info.msix[i].vector,
+ &edev->fp_array[i]);
+ } else {
+ edev->ops->common->simd_handler_clean(edev->cdev, i);
+ }
+ }
+
+ edev->int_info.used_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+ int i, rc;
+
+ /* Sanitize number of interrupts == number of prepared RSS queues */
+ if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+ DP_ERR(edev,
+ "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+ QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+ rc = request_irq(edev->int_info.msix[i].vector,
+ qede_msix_fp_int, 0, edev->fp_array[i].name,
+ &edev->fp_array[i]);
+ if (rc) {
+ DP_ERR(edev, "Request fp %d irq failed\n", i);
+ qede_sync_free_irqs(edev);
+ return rc;
+ }
+ DP_VERBOSE(edev, NETIF_MSG_INTR,
+ "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+ edev->fp_array[i].name, i,
+ &edev->fp_array[i]);
+ edev->int_info.used_cnt++;
+ }
+
+ return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+ struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+ napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+ int i, rc = 0;
+
+ /* Learn Interrupt configuration */
+ rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+ if (rc)
+ return rc;
+
+ if (edev->int_info.msix_cnt) {
+ rc = qede_req_msix_irqs(edev);
+ if (rc)
+ return rc;
+ edev->ndev->irq = edev->int_info.msix[0].vector;
+ } else {
+ const struct qed_common_ops *ops;
+
+ /* qed should learn receive the RSS ids and callbacks */
+ ops = edev->ops->common;
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+ ops->simd_handler_config(edev->cdev,
+ &edev->fp_array[i], i,
+ qede_simd_fp_handler);
+ edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+ }
+ return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ bool allow_drain)
+{
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ if (!cnt) {
+ if (allow_drain) {
+ DP_NOTICE(edev,
+ "Tx queue[%d] is stuck, requesting MCP to drain\n",
+ txq->index);
+ rc = edev->ops->common->drain(edev->cdev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(edev, txq, false);
+ }
+ DP_NOTICE(edev,
+ "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+ txq->index, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -ENODEV;
+ }
+ cnt--;
+ usleep_range(1000, 2000);
+ barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qed_dev *cdev = edev->cdev;
+ int rc, tc, i;
+
+ /* Disable the vport */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 0;
+ vport_update_params.update_rss_flg = 0;
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return rc;
+ }
+
+ /* Flush Tx queues. If needed, request drain from MCP */
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+
+ rc = qede_drain_txq(edev, txq, true);
+ if (rc)
+ return rc;
+ }
+ }
+
+ /* Stop all Queues in reverse order*/
+ for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+ struct qed_stop_rxq_params rx_params;
+
+ /* Stop the Tx Queue(s)*/
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+
+ tx_params.rss_id = i;
+ tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
+ rc = edev->ops->q_tx_stop(cdev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
+ }
+
+ /* Stop the Rx Queue*/
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = i;
+
+ rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
+ }
+
+ /* Stop the vport */
+ rc = edev->ops->vport_stop(cdev, 0);
+ if (rc)
+ DP_ERR(edev, "Failed to stop VPORT\n");
+
+ return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev)
+{
+ int rc, tc, i;
+ int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+ struct qed_dev *cdev = edev->cdev;
+ struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
+ struct qed_update_vport_params vport_update_params;
+ struct qed_queue_start_common_params q_params;
+
+ if (!edev->num_rss) {
+ DP_ERR(edev,
+ "Cannot update V-VPORT as active as there are no Rx queues\n");
+ return -EINVAL;
+ }
+
+ rc = edev->ops->vport_start(cdev, vport_id,
+ edev->ndev->mtu,
+ drop_ttl0_flg,
+ vlan_removal_en);
+
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+ vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+ dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = i;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ rc = edev->ops->q_rx_start(cdev, &q_params,
+ fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ phys_table,
+ fp->rxq->rx_comp_ring.page_cnt,
+ &fp->rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+ return rc;
+ }
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+ qede_update_rx_prod(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+ int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = txq_index;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = TX_PI(tc);
+
+ rc = edev->ops->q_tx_start(cdev, &q_params,
+ txq->tx_pbl.pbl.p_phys_table,
+ txq->tx_pbl.page_cnt,
+ &txq->doorbell_addr);
+ if (rc) {
+ DP_ERR(edev, "Start TXQ #%d failed %d\n",
+ txq_index, rc);
+ return rc;
+ }
+
+ txq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ }
+ }
+
+ /* Prepare and send the vport enable */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport_id;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 1;
+
+ /* Fill struct with RSS params */
+ if (QEDE_RSS_CNT(edev) > 1) {
+ vport_update_params.update_rss_flg = 1;
+ for (i = 0; i < 128; i++)
+ rss_params->rss_ind_table[i] =
+ ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
+ netdev_rss_key_fill(rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ } else {
+ memset(rss_params, 0, sizeof(*rss_params));
+ }
+ memcpy(&vport_update_params.rss_params, rss_params,
+ sizeof(*rss_params));
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char *mac, int num_macs)
+{
+ struct qed_filter_params filter_cmd;
+ int i;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_MCAST;
+ filter_cmd.filter.mcast.type = opcode;
+ filter_cmd.filter.mcast.num = num_macs;
+
+ for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+ ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+enum qede_unload_mode {
+ QEDE_UNLOAD_NORMAL,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+{
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "Starting qede unload\n");
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_CLOSED;
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+ rc = qede_stop_queues(edev);
+ if (rc) {
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+
+ edev->ops->fastpath_stop(edev->cdev);
+
+ /* Release the interrupts */
+ qede_sync_free_irqs(edev);
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+
+ qede_napi_disable_remove(edev);
+
+ qede_free_mem_load(edev);
+ qede_free_fp_array(edev);
+
+out:
+ mutex_unlock(&edev->qede_lock);
+ DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+ QEDE_LOAD_NORMAL,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+{
+ struct qed_link_params link_params;
+ struct qed_link_output link_output;
+ int rc;
+
+ DP_INFO(edev, "Starting qede load\n");
+
+ rc = qede_set_num_queues(edev);
+ if (rc)
+ goto err0;
+
+ rc = qede_alloc_fp_array(edev);
+ if (rc)
+ goto err0;
+
+ qede_init_fp(edev);
+
+ rc = qede_alloc_mem_load(edev);
+ if (rc)
+ goto err1;
+ DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+ QEDE_RSS_CNT(edev), edev->num_tc);
+
+ rc = qede_set_real_num_queues(edev);
+ if (rc)
+ goto err2;
+
+ qede_napi_add_enable(edev);
+ DP_INFO(edev, "Napi added and enabled\n");
+
+ rc = qede_setup_irqs(edev);
+ if (rc)
+ goto err3;
+ DP_INFO(edev, "Setup IRQs succeeded\n");
+
+ rc = qede_start_queues(edev);
+ if (rc)
+ goto err4;
+ DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+ /* Add primary mac and set Rx filters */
+ ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_OPEN;
+ mutex_unlock(&edev->qede_lock);
+
+ /* Ask for link-up using current configuration */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Query whether link is already-up */
+ memset(&link_output, 0, sizeof(link_output));
+ edev->ops->common->get_link(edev->cdev, &link_output);
+ qede_link_update(edev, &link_output);
+
+ DP_INFO(edev, "Ending successfully qede load\n");
+
+ return 0;
+
+err4:
+ qede_sync_free_irqs(edev);
+ memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
+err3:
+ qede_napi_disable_remove(edev);
+err2:
+ qede_free_mem_load(edev);
+err1:
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+ qede_free_fp_array(edev);
+ edev->num_rss = 0;
+err0:
+ return rc;
+}
+
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *, union qede_reload_args *),
+ union qede_reload_args *args)
+{
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+ /* Call function handler to update parameters
+ * needed for function load.
+ */
+ if (func)
+ func(edev, args);
+
+ qede_load(edev, QEDE_LOAD_NORMAL);
+
+ mutex_lock(&edev->qede_lock);
+ qede_config_rx_mode(edev->ndev);
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+
+ edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+ return qede_load(edev, QEDE_LOAD_NORMAL);
+}
+
+static int qede_close(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+
+ return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qede_dev *edev = dev;
+
+ if (!netif_running(edev->ndev)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ return;
+ }
+
+ if (link->link_up) {
+ DP_NOTICE(edev, "Link is up\n");
+ netif_tx_start_all_queues(edev->ndev);
+ netif_carrier_on(edev->ndev);
+ } else {
+ DP_NOTICE(edev, "Link is down\n");
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+ }
+}
+
+static int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int rc;
+
+ ASSERT_RTNL(); /* @@@TBD To be removed */
+
+ DP_INFO(edev, "Set_mac_addr called\n");
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ DP_NOTICE(edev, "The MAC address is not valid\n");
+ return -EFAULT;
+ }
+
+ ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+ if (!netif_running(ndev)) {
+ DP_NOTICE(edev, "The device is currently down\n");
+ return 0;
+ }
+
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ edev->primary_mac);
+ if (rc)
+ return rc;
+
+ /* Add MAC filter according to the new unicast HW MAC address */
+ ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+ return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+ enum qed_filter_rx_mode_type *accept_flags)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ unsigned char *mc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc = 0, mc_count;
+ size_t size;
+
+ size = 64 * ETH_ALEN;
+
+ mc_macs = kzalloc(size, GFP_KERNEL);
+ if (!mc_macs) {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for multicast MACs\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ temp = mc_macs;
+
+ /* Remove all previously configured MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ mc_macs, 1);
+ if (rc)
+ goto exit;
+
+ netif_addr_lock_bh(ndev);
+
+ mc_count = netdev_mc_count(ndev);
+ if (mc_count < 64) {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Check for all multicast @@@TBD resource allocation */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (mc_count > 64)) {
+ if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+ *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+ } else {
+ /* Add all multicast MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ mc_macs, mc_count);
+ }
+
+exit:
+ kfree(mc_macs);
+ return rc;
+}
+
+static void qede_set_rx_mode(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_INFO(edev, "qede_set_rx_mode called\n");
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_INFO(edev,
+ "qede_set_rx_mode called while interface is down\n");
+ } else {
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+ }
+}
+
+/* Must be called with qede_lock held */
+static void qede_config_rx_mode(struct net_device *ndev)
+{
+ enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_filter_params rx_mode;
+ unsigned char *uc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc, uc_count;
+ size_t size;
+
+ netif_addr_lock_bh(ndev);
+
+ uc_count = netdev_uc_count(ndev);
+ size = uc_count * ETH_ALEN;
+
+ uc_macs = kzalloc(size, GFP_ATOMIC);
+ if (!uc_macs) {
+ DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+ netif_addr_unlock_bh(ndev);
+ return;
+ }
+
+ temp = uc_macs;
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Configure the struct for the Rx mode */
+ memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+ rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+ /* Remove all previous unicast secondary macs and multicast macs
+ * (configrue / leave the primary mac)
+ */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+ edev->primary_mac);
+ if (rc)
+ goto out;
+
+ /* Check for promiscuous */
+ if ((ndev->flags & IFF_PROMISC) ||
+ (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ } else {
+ /* Add MAC filters according to the unicast secondary macs */
+ int i;
+
+ temp = uc_macs;
+ for (i = 0; i < uc_count; i++) {
+ rc = qede_set_ucast_rx_mac(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ temp);
+ if (rc)
+ goto out;
+
+ temp += ETH_ALEN;
+ }
+
+ rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+ if (rc)
+ goto out;
+ }
+
+ rx_mode.filter.accept_flags = accept_flags;
+ edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+ kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 974637d..6e11ee6 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2062,7 +2062,7 @@
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
napi_hash_add(&channel->napi_str);
- efx_channel_init_lock(channel);
+ efx_channel_busy_poll_init(channel);
}
static void efx_init_napi(struct efx_nic *efx)
@@ -2125,7 +2125,7 @@
if (!netif_running(efx->net_dev))
return LL_FLUSH_FAILED;
- if (!efx_channel_lock_poll(channel))
+ if (!efx_channel_try_lock_poll(channel))
return LL_FLUSH_BUSY;
old_rx_packets = channel->rx_queue.rx_packets;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index ad56231..229e68c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -431,21 +431,8 @@
struct net_device *napi_dev;
struct napi_struct napi_str;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
- spinlock_t state_lock;
-#define EFX_CHANNEL_STATE_IDLE 0
-#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */
-#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */
-#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */
-#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */
-#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */
-#define EFX_CHANNEL_OWNED \
- (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
-#define EFX_CHANNEL_LOCKED \
- (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
-#define EFX_CHANNEL_USER_PEND \
- (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ unsigned long busy_poll_state;
+#endif
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
@@ -480,98 +467,94 @@
};
#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+enum efx_channel_busy_poll_state {
+ EFX_CHANNEL_STATE_IDLE = 0,
+ EFX_CHANNEL_STATE_NAPI = BIT(0),
+ EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
+ EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
+ EFX_CHANNEL_STATE_POLL_BIT = 2,
+ EFX_CHANNEL_STATE_POLL = BIT(2),
+ EFX_CHANNEL_STATE_DISABLE_BIT = 3,
+};
+
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
- spin_lock_init(&channel->state_lock);
+ WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
}
/* Called from the device poll routine to get ownership of a channel. */
static inline bool efx_channel_lock_napi(struct efx_channel *channel)
{
- bool rc = true;
+ unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
- spin_lock_bh(&channel->state_lock);
- if (channel->state & EFX_CHANNEL_LOCKED) {
- WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
- channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
- rc = false;
- } else {
- /* we don't care if someone yielded */
- channel->state = EFX_CHANNEL_STATE_NAPI;
+ while (1) {
+ switch (old) {
+ case EFX_CHANNEL_STATE_POLL:
+ /* Ensure efx_channel_try_lock_poll() wont starve us */
+ set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
+ &channel->busy_poll_state);
+ /* fallthrough */
+ case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
+ return false;
+ default:
+ break;
+ }
+ prev = cmpxchg(&channel->busy_poll_state, old,
+ EFX_CHANNEL_STATE_NAPI);
+ if (unlikely(prev != old)) {
+ /* This is likely to mean we've just entered polling
+ * state. Go back round to set the REQ bit.
+ */
+ old = prev;
+ continue;
+ }
+ return true;
}
- spin_unlock_bh(&channel->state_lock);
- return rc;
}
static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- WARN_ON(channel->state &
- (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
-
- channel->state &= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
+ /* Make sure write has completed from efx_channel_lock_napi() */
+ smp_wmb();
+ WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
}
/* Called from efx_busy_poll(). */
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if ((channel->state & EFX_CHANNEL_LOCKED)) {
- channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
- rc = false;
- } else {
- /* preserve yield marks */
- channel->state |= EFX_CHANNEL_STATE_POLL;
- }
- spin_unlock_bh(&channel->state_lock);
- return rc;
+ return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
+ EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
}
-/* Returns true if NAPI tried to get the channel while it was locked. */
static inline void efx_channel_unlock_poll(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
-
- /* will reset state to idle, unless channel is disabled */
- channel->state &= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
+ clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
}
-/* True if a socket is polling, even if it did not get the lock. */
static inline bool efx_channel_busy_polling(struct efx_channel *channel)
{
- WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
- return channel->state & EFX_CHANNEL_USER_PEND;
+ return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
}
static inline void efx_channel_enable(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- channel->state = EFX_CHANNEL_STATE_IDLE;
- spin_unlock_bh(&channel->state_lock);
+ clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
+ &channel->busy_poll_state);
}
-/* False if the channel is currently owned. */
+/* Stop further polling or napi access.
+ * Returns false if the channel is currently busy polling.
+ */
static inline bool efx_channel_disable(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if (channel->state & EFX_CHANNEL_OWNED)
- rc = false;
- channel->state |= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
-
- return rc;
+ set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
+ /* Implicit barrier in efx_channel_busy_polling() */
+ return !efx_channel_busy_polling(channel);
}
#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
}
@@ -584,7 +567,7 @@
{
}
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
return false;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 3b75adf..040fbc1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
#include <linux/if_vlan.h>
@@ -366,6 +367,7 @@
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
+ struct device_node *phy_node;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
struct device *dev;
@@ -1146,7 +1148,11 @@
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
- slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+ if (priv->phy_node)
+ slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ else
+ slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
if (IS_ERR(slave->phy)) {
dev_err(priv->dev, "phy %s not found on slave %d\n",
@@ -1934,11 +1940,12 @@
slave->port_vlan = data->dual_emac_res_vlan;
}
-static int cpsw_probe_dt(struct cpsw_platform_data *data,
+static int cpsw_probe_dt(struct cpsw_priv *priv,
struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
+ struct cpsw_platform_data *data = &priv->data;
int i = 0, ret;
u32 prop;
@@ -2029,6 +2036,7 @@
if (strcmp(slave_node->name, "slave"))
continue;
+ priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
@@ -2044,7 +2052,6 @@
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
-
slave_data->phy_if = of_get_phy_mode(slave_node);
if (slave_data->phy_if < 0) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2245,7 +2252,7 @@
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&priv->data, pdev)) {
+ if (cpsw_probe_dt(priv, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
@@ -2583,17 +2590,7 @@
.remove = cpsw_remove,
};
-static int __init cpsw_init(void)
-{
- return platform_driver_register(&cpsw_driver);
-}
-late_initcall(cpsw_init);
-
-static void __exit cpsw_exit(void)
-{
- platform_driver_unregister(&cpsw_driver);
-}
-module_exit(cpsw_exit);
+module_platform_driver(cpsw_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cde29f8..de5c30c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -46,16 +46,27 @@
static int geneve_net_id;
+union geneve_addr {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr sa;
+};
+
+static union geneve_addr geneve_remote_unspec = { .sa.sa_family = AF_UNSPEC, };
+
/* Pseudo network device */
struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
- struct geneve_sock *sock; /* socket used for geneve tunnel */
+ struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */
+#if IS_ENABLED(CONFIG_IPV6)
+ struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */
+#endif
u8 vni[3]; /* virtual network ID for tunnel */
u8 ttl; /* TTL override */
u8 tos; /* TOS override */
- struct sockaddr_in remote; /* IPv4 address for link partner */
+ union geneve_addr remote; /* IP address for link partner */
struct list_head next; /* geneve's per namespace list */
__be16 dst_port;
bool collect_md;
@@ -103,12 +114,32 @@
vni_list_head = &gs->vni_list[hash];
hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
- addr == geneve->remote.sin_addr.s_addr)
+ addr == geneve->remote.sin.sin_addr.s_addr)
return geneve;
}
return NULL;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
+ struct in6_addr addr6, u8 vni[])
+{
+ struct hlist_head *vni_list_head;
+ struct geneve_dev *geneve;
+ __u32 hash;
+
+ /* Find the device for this VNI */
+ hash = geneve_net_vni_hash(vni);
+ vni_list_head = &gs->vni_list[hash];
+ hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
+ if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+ ipv6_addr_equal(&addr6, &geneve->remote.sin6.sin6_addr))
+ return geneve;
+ }
+ return NULL;
+}
+#endif
+
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
{
return (struct genevehdr *)(udp_hdr(skb) + 1);
@@ -121,24 +152,49 @@
struct metadata_dst *tun_dst = NULL;
struct geneve_dev *geneve = NULL;
struct pcpu_sw_netstats *stats;
- struct iphdr *iph;
- u8 *vni;
+ struct iphdr *iph = NULL;
__be32 addr;
- int err;
+ static u8 zero_vni[3];
+ u8 *vni;
+ int err = 0;
+ sa_family_t sa_family;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h = NULL;
+ struct in6_addr addr6;
+ static struct in6_addr zero_addr6;
+#endif
- iph = ip_hdr(skb); /* outer IP header... */
+ sa_family = gs->sock->sk->sk_family;
- if (gs->collect_md) {
- static u8 zero_vni[3];
+ if (sa_family == AF_INET) {
+ iph = ip_hdr(skb); /* outer IP header... */
- vni = zero_vni;
- addr = 0;
- } else {
- vni = gnvh->vni;
- addr = iph->saddr;
+ if (gs->collect_md) {
+ vni = zero_vni;
+ addr = 0;
+ } else {
+ vni = gnvh->vni;
+
+ addr = iph->saddr;
+ }
+
+ geneve = geneve_lookup(gs, addr, vni);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (sa_family == AF_INET6) {
+ ip6h = ipv6_hdr(skb); /* outer IPv6 header... */
+
+ if (gs->collect_md) {
+ vni = zero_vni;
+ addr6 = zero_addr6;
+ } else {
+ vni = gnvh->vni;
+
+ addr6 = ip6h->saddr;
+ }
+
+ geneve = geneve6_lookup(gs, addr6, vni);
+#endif
}
-
- geneve = geneve_lookup(gs, addr, vni);
if (!geneve)
goto drop;
@@ -149,7 +205,7 @@
(gnvh->oam ? TUNNEL_OAM : 0) |
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
- tun_dst = udp_tun_rx_dst(skb, AF_INET, flags,
+ tun_dst = udp_tun_rx_dst(skb, sa_family, flags,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
if (!tun_dst)
@@ -179,12 +235,25 @@
skb_reset_network_header(skb);
- err = IP_ECN_decapsulate(iph, skb);
+ if (iph)
+ err = IP_ECN_decapsulate(iph, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ip6h)
+ err = IP6_ECN_decapsulate(ip6h, skb);
+#endif
if (unlikely(err)) {
- if (log_ecn_error)
- net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
- &iph->saddr, iph->tos);
+ if (log_ecn_error) {
+ if (iph)
+ net_info_ratelimited("non-ECT from %pI4 "
+ "with TOS=%#x\n",
+ &iph->saddr, iph->tos);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ip6h)
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &ip6h->saddr);
+#endif
+ }
if (err > 1) {
++geneve->dev->stats.rx_frame_errors;
++geneve->dev->stats.rx_errors;
@@ -284,6 +353,7 @@
if (ipv6) {
udp_conf.family = AF_INET6;
+ udp_conf.ipv6_v6only = 1;
} else {
udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
@@ -458,9 +528,9 @@
udp_del_offload(&gs->udp_offloads);
}
-static void geneve_sock_release(struct geneve_sock *gs)
+static void __geneve_sock_release(struct geneve_sock *gs)
{
- if (--gs->refcnt)
+ if (!gs || --gs->refcnt)
return;
list_del(&gs->list);
@@ -469,66 +539,117 @@
kfree_rcu(gs, rcu);
}
+static void geneve_sock_release(struct geneve_dev *geneve)
+{
+ __geneve_sock_release(geneve->sock4);
+#if IS_ENABLED(CONFIG_IPV6)
+ __geneve_sock_release(geneve->sock6);
+#endif
+}
+
static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
+ sa_family_t family,
__be16 dst_port)
{
struct geneve_sock *gs;
list_for_each_entry(gs, &gn->sock_list, list) {
if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
- inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) {
+ inet_sk(gs->sock->sk)->sk.sk_family == family) {
return gs;
}
}
return NULL;
}
-static int geneve_open(struct net_device *dev)
+static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
{
- struct geneve_dev *geneve = netdev_priv(dev);
struct net *net = geneve->net;
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
__u32 hash;
- gs = geneve_find_sock(gn, geneve->dst_port);
+ gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->dst_port);
if (gs) {
gs->refcnt++;
goto out;
}
- gs = geneve_socket_create(net, geneve->dst_port, false);
+ gs = geneve_socket_create(net, geneve->dst_port, ipv6);
if (IS_ERR(gs))
return PTR_ERR(gs);
out:
gs->collect_md = geneve->collect_md;
- geneve->sock = gs;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6)
+ geneve->sock6 = gs;
+ else
+#endif
+ geneve->sock4 = gs;
hash = geneve_net_vni_hash(geneve->vni);
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
return 0;
}
+static int geneve_open(struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ bool ipv6 = geneve->remote.sa.sa_family == AF_INET6;
+ bool metadata = geneve->collect_md;
+ int ret = 0;
+
+ geneve->sock4 = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ geneve->sock6 = NULL;
+ if (ipv6 || metadata)
+ ret = geneve_sock_add(geneve, true);
+#endif
+ if (!ret && (!ipv6 || metadata))
+ ret = geneve_sock_add(geneve, false);
+ if (ret < 0)
+ geneve_sock_release(geneve);
+
+ return ret;
+}
+
static int geneve_stop(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs = geneve->sock;
if (!hlist_unhashed(&geneve->hlist))
hlist_del_rcu(&geneve->hlist);
- geneve_sock_release(gs);
+ geneve_sock_release(geneve);
return 0;
}
+static void geneve_build_header(struct genevehdr *geneveh,
+ __be16 tun_flags, u8 vni[3],
+ u8 options_len, u8 *options)
+{
+ geneveh->ver = GENEVE_VER;
+ geneveh->opt_len = options_len / 4;
+ geneveh->oam = !!(tun_flags & TUNNEL_OAM);
+ geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->rsvd1 = 0;
+ memcpy(geneveh->vni, vni, 3);
+ geneveh->proto_type = htons(ETH_P_TEB);
+ geneveh->rsvd2 = 0;
+
+ memcpy(geneveh->options, options, options_len);
+}
+
static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool csum)
+ bool csum, bool xnet)
{
struct genevehdr *gnvh;
int min_headroom;
int err;
+ skb_scrub_packet(skb, xnet);
+
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
err = skb_cow_head(skb, min_headroom);
@@ -544,15 +665,7 @@
}
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
- gnvh->ver = GENEVE_VER;
- gnvh->opt_len = opt_len / 4;
- gnvh->oam = !!(tun_flags & TUNNEL_OAM);
- gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
- gnvh->rsvd1 = 0;
- memcpy(gnvh->vni, vni, 3);
- gnvh->proto_type = htons(ETH_P_TEB);
- gnvh->rsvd2 = 0;
- memcpy(gnvh->options, opt, opt_len);
+ geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
return 0;
@@ -562,10 +675,47 @@
return err;
}
-static struct rtable *geneve_get_rt(struct sk_buff *skb,
- struct net_device *dev,
- struct flowi4 *fl4,
- struct ip_tunnel_info *info)
+#if IS_ENABLED(CONFIG_IPV6)
+static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
+ __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+ bool csum, bool xnet)
+{
+ struct genevehdr *gnvh;
+ int min_headroom;
+ int err;
+
+ skb_scrub_packet(skb, xnet);
+
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto free_dst;
+ }
+
+ skb = udp_tunnel_handle_offloads(skb, csum);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto free_dst;
+ }
+
+ gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+ geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+
+ skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ return 0;
+
+free_dst:
+ dst_release(dst);
+ return err;
+}
+#endif
+
+static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi4 *fl4,
+ struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
struct rtable *rt = NULL;
@@ -588,24 +738,67 @@
}
fl4->flowi4_tos = RT_TOS(tos);
- fl4->daddr = geneve->remote.sin_addr.s_addr;
+ fl4->daddr = geneve->remote.sin.sin_addr.s_addr;
}
rt = ip_route_output_key(geneve->net, fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
- dev->stats.tx_carrier_errors++;
- return rt;
+ return ERR_PTR(-ENETUNREACH);
}
if (rt->dst.dev == dev) { /* is this necessary? */
netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
- dev->stats.collisions++;
ip_rt_put(rt);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ELOOP);
}
return rt;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi6 *fl6,
+ struct ip_tunnel_info *info)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct geneve_sock *gs6 = geneve->sock6;
+ struct dst_entry *dst = NULL;
+ __u8 prio;
+
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_mark = skb->mark;
+ fl6->flowi6_proto = IPPROTO_UDP;
+
+ if (info) {
+ fl6->daddr = info->key.u.ipv6.dst;
+ fl6->saddr = info->key.u.ipv6.src;
+ fl6->flowi6_tos = RT_TOS(info->key.tos);
+ } else {
+ prio = geneve->tos;
+ if (prio == 1) {
+ const struct iphdr *iip = ip_hdr(skb);
+
+ prio = ip_tunnel_get_dsfield(iip, skb);
+ }
+
+ fl6->flowi6_tos = RT_TOS(prio);
+ fl6->daddr = geneve->remote.sin6.sin6_addr;
+ }
+
+ if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
+ netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
+ return ERR_PTR(-ENETUNREACH);
+ }
+ if (dst->dev == dev) { /* is this necessary? */
+ netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr);
+ dst_release(dst);
+ return ERR_PTR(-ELOOP);
+ }
+
+ return dst;
+}
+#endif
+
/* Convert 64 bit tunnel ID to 24 bit VNI. */
static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
{
@@ -620,23 +813,23 @@
#endif
}
-static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs = geneve->sock;
- struct ip_tunnel_info *info = NULL;
+ struct geneve_sock *gs4 = geneve->sock4;
struct rtable *rt = NULL;
const struct iphdr *iip; /* interior IP header */
+ int err = -EINVAL;
struct flowi4 fl4;
__u8 tos, ttl;
__be16 sport;
bool udp_csum;
__be16 df;
- int err;
+ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
if (geneve->collect_md) {
- info = skb_tunnel_info(skb);
- if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) {
+ if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
goto tx_error;
}
@@ -644,10 +837,9 @@
goto tx_error;
}
- rt = geneve_get_rt(skb, dev, &fl4, info);
+ rt = geneve_get_v4_rt(skb, dev, &fl4, info);
if (IS_ERR(rt)) {
- netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
- dev->stats.tx_carrier_errors++;
+ err = PTR_ERR(rt);
goto tx_error;
}
@@ -667,7 +859,7 @@
udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
err = geneve_build_skb(rt, skb, key->tun_flags, vni,
- info->options_len, opts, udp_csum);
+ info->options_len, opts, udp_csum, xnet);
if (unlikely(err))
goto err;
@@ -677,7 +869,7 @@
} else {
udp_csum = false;
err = geneve_build_skb(rt, skb, 0, geneve->vni,
- 0, NULL, udp_csum);
+ 0, NULL, udp_csum, xnet);
if (unlikely(err))
goto err;
@@ -688,7 +880,7 @@
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
df = 0;
}
- err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr,
+ err = udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
tos, ttl, df, sport, geneve->dst_port,
!net_eq(geneve->net, dev_net(geneve->dev)),
!udp_csum);
@@ -699,10 +891,152 @@
tx_error:
dev_kfree_skb(skb);
err:
- dev->stats.tx_errors++;
+ if (err == -ELOOP)
+ dev->stats.collisions++;
+ else if (err == -ENETUNREACH)
+ dev->stats.tx_carrier_errors++;
+ else
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct ip_tunnel_info *info)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct geneve_sock *gs6 = geneve->sock6;
+ struct dst_entry *dst = NULL;
+ const struct iphdr *iip; /* interior IP header */
+ int err = -EINVAL;
+ struct flowi6 fl6;
+ __u8 prio, ttl;
+ __be16 sport;
+ bool udp_csum;
+ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+
+ if (geneve->collect_md) {
+ if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
+ netdev_dbg(dev, "no tunnel metadata\n");
+ goto tx_error;
+ }
+ }
+
+ dst = geneve_get_v6_dst(skb, dev, &fl6, info);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto tx_error;
+ }
+
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ skb_reset_mac_header(skb);
+
+ iip = ip_hdr(skb);
+
+ if (info) {
+ const struct ip_tunnel_key *key = &info->key;
+ u8 *opts = NULL;
+ u8 vni[3];
+
+ tunnel_id_to_vni(key->tun_id, vni);
+ if (key->tun_flags & TUNNEL_GENEVE_OPT)
+ opts = ip_tunnel_info_opts(info);
+
+ udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
+ err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
+ info->options_len, opts,
+ udp_csum, xnet);
+ if (unlikely(err))
+ goto err;
+
+ prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
+ ttl = key->ttl;
+ } else {
+ udp_csum = false;
+ err = geneve6_build_skb(dst, skb, 0, geneve->vni,
+ 0, NULL, udp_csum, xnet);
+ if (unlikely(err))
+ goto err;
+
+ prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb);
+ ttl = geneve->ttl;
+ if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
+ ttl = 1;
+ ttl = ttl ? : ip6_dst_hoplimit(dst);
+ }
+ err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
+ &fl6.saddr, &fl6.daddr, prio, ttl,
+ sport, geneve->dst_port, !udp_csum);
+
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+ return NETDEV_TX_OK;
+
+tx_error:
+ dev_kfree_skb(skb);
+err:
+ if (err == -ELOOP)
+ dev->stats.collisions++;
+ else if (err == -ENETUNREACH)
+ dev->stats.tx_carrier_errors++;
+ else
+ dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+}
+#endif
+
+static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct ip_tunnel_info *info = NULL;
+
+ if (geneve->collect_md)
+ info = skb_tunnel_info(skb);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
+ (!info && geneve->remote.sa.sa_family == AF_INET6))
+ return geneve6_xmit_skb(skb, dev, info);
+#endif
+ return geneve_xmit_skb(skb, dev, info);
+}
+
+static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct rtable *rt;
+ struct flowi4 fl4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+#endif
+
+ if (ip_tunnel_info_af(info) == AF_INET) {
+ rt = geneve_get_v4_rt(skb, dev, &fl4, info);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+ info->key.u.ipv4.src = fl4.saddr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (ip_tunnel_info_af(info) == AF_INET6) {
+ dst = geneve_get_v6_dst(skb, dev, &fl6, info);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ dst_release(dst);
+ info->key.u.ipv6.src = fl6.saddr;
+#endif
+ } else {
+ return -EINVAL;
+ }
+
+ info->key.tp_src = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
+ info->key.tp_dst = geneve->dst_port;
+ return 0;
+}
+
static const struct net_device_ops geneve_netdev_ops = {
.ndo_init = geneve_init,
.ndo_uninit = geneve_uninit,
@@ -713,6 +1047,7 @@
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
};
static void geneve_get_drvinfo(struct net_device *dev,
@@ -759,6 +1094,7 @@
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_ID] = { .type = NLA_U32 },
[IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) },
[IFLA_GENEVE_TTL] = { .type = NLA_U8 },
[IFLA_GENEVE_TOS] = { .type = NLA_U8 },
[IFLA_GENEVE_PORT] = { .type = NLA_U16 },
@@ -790,7 +1126,7 @@
static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
__be16 dst_port,
- __be32 rem_addr,
+ union geneve_addr *remote,
u8 vni[],
bool *tun_on_same_port,
bool *tun_collect_md)
@@ -806,7 +1142,7 @@
*tun_on_same_port = true;
}
if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
- rem_addr == geneve->remote.sin_addr.s_addr &&
+ !memcmp(remote, &geneve->remote, sizeof(geneve->remote)) &&
dst_port == geneve->dst_port)
t = geneve;
}
@@ -814,18 +1150,20 @@
}
static int geneve_configure(struct net *net, struct net_device *dev,
- __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
- __be16 dst_port, bool metadata)
+ union geneve_addr *remote,
+ __u32 vni, __u8 ttl, __u8 tos, __be16 dst_port,
+ bool metadata)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
bool tun_collect_md, tun_on_same_port;
int err;
- if (metadata) {
- if (rem_addr || vni || tos || ttl)
- return -EINVAL;
- }
+ if (!remote)
+ return -EINVAL;
+ if (metadata &&
+ (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl))
+ return -EINVAL;
geneve->net = net;
geneve->dev = dev;
@@ -834,16 +1172,19 @@
geneve->vni[1] = (vni & 0x0000ff00) >> 8;
geneve->vni[2] = vni & 0x000000ff;
- geneve->remote.sin_addr.s_addr = rem_addr;
- if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
+ if ((remote->sa.sa_family == AF_INET &&
+ IN_MULTICAST(ntohl(remote->sin.sin_addr.s_addr))) ||
+ (remote->sa.sa_family == AF_INET6 &&
+ ipv6_addr_is_multicast(&remote->sin6.sin6_addr)))
return -EINVAL;
+ geneve->remote = *remote;
geneve->ttl = ttl;
geneve->tos = tos;
geneve->dst_port = dst_port;
geneve->collect_md = metadata;
- t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
+ t = geneve_find_dev(gn, dst_port, remote, geneve->vni,
&tun_on_same_port, &tun_collect_md);
if (t)
return -EBUSY;
@@ -870,15 +1211,36 @@
__be16 dst_port = htons(GENEVE_UDP_PORT);
__u8 ttl = 0, tos = 0;
bool metadata = false;
- __be32 rem_addr = 0;
+ union geneve_addr remote = geneve_remote_unspec;
__u32 vni = 0;
+ if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6])
+ return -EINVAL;
+
+ if (data[IFLA_GENEVE_REMOTE]) {
+ remote.sa.sa_family = AF_INET;
+ remote.sin.sin_addr.s_addr =
+ nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+ }
+
+ if (data[IFLA_GENEVE_REMOTE6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ remote.sa.sa_family = AF_INET6;
+ remote.sin6.sin6_addr =
+ nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]);
+
+ if (ipv6_addr_type(&remote.sin6.sin6_addr) &
+ IPV6_ADDR_LINKLOCAL) {
+ netdev_dbg(dev, "link-local remote is unsupported\n");
+ return -EINVAL;
+ }
+ }
+
if (data[IFLA_GENEVE_ID])
vni = nla_get_u32(data[IFLA_GENEVE_ID]);
- if (data[IFLA_GENEVE_REMOTE])
- rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
-
if (data[IFLA_GENEVE_TTL])
ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
@@ -891,8 +1253,8 @@
if (data[IFLA_GENEVE_COLLECT_METADATA])
metadata = true;
- return geneve_configure(net, dev, rem_addr, vni,
- ttl, tos, dst_port, metadata);
+ return geneve_configure(net, dev, &remote, vni, ttl, tos, dst_port,
+ metadata);
}
static void geneve_dellink(struct net_device *dev, struct list_head *head)
@@ -906,7 +1268,7 @@
static size_t geneve_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */
- nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
+ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
@@ -923,9 +1285,17 @@
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
goto nla_put_failure;
- if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
- geneve->remote.sin_addr.s_addr))
- goto nla_put_failure;
+ if (geneve->remote.sa.sa_family == AF_INET) {
+ if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
+ geneve->remote.sin.sin_addr.s_addr))
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
+ &geneve->remote.sin6.sin6_addr))
+ goto nla_put_failure;
+#endif
+ }
if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
@@ -971,7 +1341,8 @@
if (IS_ERR(dev))
return dev;
- err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
+ err = geneve_configure(net, dev, &geneve_remote_unspec,
+ 0, 0, 0, htons(dst_port), true);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 24f8dbc..d50887e 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -348,7 +348,7 @@
struct rtable *rt;
int err, ret = NET_XMIT_DROP;
struct flowi4 fl4 = {
- .flowi4_oif = dev_get_iflink(dev),
+ .flowi4_oif = dev->ifindex,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.daddr = ip4h->daddr,
@@ -386,7 +386,7 @@
struct dst_entry *dst;
int err, ret = NET_XMIT_DROP;
struct flowi6 fl6 = {
- .flowi6_iif = skb->dev->ifindex,
+ .flowi6_iif = dev->ifindex,
.daddr = ip6h->daddr,
.saddr = ip6h->saddr,
.flowi6_flags = FLOWI_FLAG_ANYSRC,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 248478c..197c939 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -137,7 +137,7 @@
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
{
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a7fb665..60994a8 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -141,6 +141,11 @@
---help---
Supports the KSZ9021, VSC8201, KS8001 PHYs.
+config DP83848_PHY
+ tristate "Driver for Texas Instruments DP83848 PHY"
+ ---help---
+ Supports the DP83848 PHY.
+
config DP83867_PHY
tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 7655d47..f31a4e2 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -26,6 +26,7 @@
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
+obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
new file mode 100644
index 0000000..5ce9bef
--- /dev/null
+++ b/drivers/net/phy/dp83848.c
@@ -0,0 +1,99 @@
+/*
+ * Driver for the Texas Instruments DP83848 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DP83848_PHY_ID 0x20005c90
+
+/* Registers */
+#define DP83848_MICR 0x11
+#define DP83848_MISR 0x12
+
+/* MICR Register Fields */
+#define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */
+#define DP83848_MICR_INTEN BIT(1) /* Interrupt Enable */
+
+/* MISR Register Fields */
+#define DP83848_MISR_RHF_INT_EN BIT(0) /* Receive Error Counter */
+#define DP83848_MISR_FHF_INT_EN BIT(1) /* False Carrier Counter */
+#define DP83848_MISR_ANC_INT_EN BIT(2) /* Auto-negotiation complete */
+#define DP83848_MISR_DUP_INT_EN BIT(3) /* Duplex Status */
+#define DP83848_MISR_SPD_INT_EN BIT(4) /* Speed status */
+#define DP83848_MISR_LINK_INT_EN BIT(5) /* Link status */
+#define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */
+#define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */
+
+static int dp83848_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, DP83848_MISR);
+
+ return err < 0 ? err : 0;
+}
+
+static int dp83848_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_write(phydev, DP83848_MICR,
+ DP83848_MICR_INT_OE |
+ DP83848_MICR_INTEN);
+ if (err < 0)
+ return err;
+
+ return phy_write(phydev, DP83848_MISR,
+ DP83848_MISR_ANC_INT_EN |
+ DP83848_MISR_DUP_INT_EN |
+ DP83848_MISR_SPD_INT_EN |
+ DP83848_MISR_LINK_INT_EN);
+ }
+
+ return phy_write(phydev, DP83848_MICR, 0x0);
+}
+
+static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+ { DP83848_PHY_ID, 0xfffffff0 },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+
+static struct phy_driver dp83848_driver[] = {
+ {
+ .phy_id = DP83848_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "TI DP83848",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+
+ .soft_reset = genphy_soft_reset,
+ .config_init = genphy_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+
+ /* IRQ related */
+ .ack_interrupt = dp83848_ack_interrupt,
+ .config_intr = dp83848_config_intr,
+
+ .driver = { .owner = THIS_MODULE, },
+ },
+};
+module_phy_driver(dp83848_driver);
+
+MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 3bc9f03..95f51d7 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/mdio-gpio.h>
+#include <linux/platform_data/mdio-gpio.h>
#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 12f44c5..88cb459 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -372,6 +372,33 @@
EXPORT_SYMBOL(mdiobus_scan);
/**
+ * mdiobus_read_nested - Nested version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
+{
+ int retval;
+
+ BUG_ON(in_interrupt());
+
+ mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ retval = bus->read(bus, addr, regnum);
+ mutex_unlock(&bus->mdio_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(mdiobus_read_nested);
+
+/**
* mdiobus_read - Convenience function for reading a given MII mgmt register
* @bus: the mii_bus struct
* @addr: the phy address
@@ -396,6 +423,34 @@
EXPORT_SYMBOL(mdiobus_read);
/**
+ * mdiobus_write_nested - Nested version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+ int err;
+
+ BUG_ON(in_interrupt());
+
+ mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ err = bus->write(bus, addr, regnum, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mdiobus_write_nested);
+
+/**
* mdiobus_write - Convenience function for writing a given MII mgmt register
* @bus: the mii_bus struct
* @addr: the phy address
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 499185e..cf6312f 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -514,6 +514,27 @@
return 0;
}
+static int ksz9031_read_status(struct phy_device *phydev)
+{
+ int err;
+ int regval;
+
+ err = genphy_read_status(phydev);
+ if (err)
+ return err;
+
+ /* Make sure the PHY is not broken. Read idle error count,
+ * and reset the PHY if it is maxed out.
+ */
+ regval = phy_read(phydev, MII_STAT1000);
+ if ((regval & 0xFF) == 0xFF) {
+ phy_init_hw(phydev);
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
static int ksz8873mll_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -772,7 +793,7 @@
.driver_data = &ksz9021_type,
.config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
+ .read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 70b0895..dc2da87 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -43,16 +43,25 @@
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int __maybe_unused len;
+ struct device *dev __maybe_unused = &phydev->dev;
+ struct device_node *of_node __maybe_unused = dev->of_node;
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ int enable_energy = 1;
if (rc < 0)
return rc;
- /* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
- rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
+ if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
+ enable_energy = 0;
+
+ if (enable_energy) {
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+ }
return smsc_phy_ack_interrupt(phydev);
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ed7506..5e0b432 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -589,7 +589,7 @@
po = pppox_sk(sk);
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4752e69..75ae756 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -711,6 +711,10 @@
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
+ {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index cf262cc..6369a57 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2360,6 +2360,46 @@
return 0;
}
+static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
+ struct ip_tunnel_info *info,
+ __be16 sport, __be16 dport)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_tos = RT_TOS(info->key.tos);
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ fl4.daddr = info->key.u.ipv4.dst;
+
+ rt = ip_route_output_key(vxlan->net, &fl4);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+ ip_rt_put(rt);
+
+ info->key.u.ipv4.src = fl4.saddr;
+ info->key.tp_src = sport;
+ info->key.tp_dst = dport;
+ return 0;
+}
+
+static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ __be16 sport, dport;
+
+ sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+ vxlan->cfg.port_max, true);
+ dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
+
+ if (ip_tunnel_info_af(info) == AF_INET)
+ return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+ return -EINVAL;
+}
+
static const struct net_device_ops vxlan_netdev_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
@@ -2374,6 +2414,7 @@
.ndo_fdb_add = vxlan_fdb_add,
.ndo_fdb_del = vxlan_fdb_delete,
.ndo_fdb_dump = vxlan_fdb_dump,
+ .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
/* Info for udev, that this is a virtual tunnel endpoint */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index a63ab2e..f9f9422 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -214,8 +214,6 @@
If you choose to build a module, it'll be called rndis_wlan.
-source "drivers/net/wireless/rtl818x/Kconfig"
-
config ADM8211
tristate "ADMtek ADM8211 support"
depends on MAC80211 && PCI
@@ -243,6 +241,8 @@
Thanks to Infineon-ADMtek for their support of this driver.
+source "drivers/net/wireless/realtek/rtl818x/Kconfig"
+
config MAC80211_HWSIM
tristate "Simulated radio testing tool for mac80211"
depends on MAC80211
@@ -278,7 +278,8 @@
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
source "drivers/net/wireless/mediatek/Kconfig"
-source "drivers/net/wireless/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/mwifiex/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 6b9e729..740fdd3 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -22,9 +22,7 @@
obj-$(CONFIG_B43) += b43/
obj-$(CONFIG_B43LEGACY) += b43legacy/
obj-$(CONFIG_ZD1211RW) += zd1211rw/
-obj-$(CONFIG_RTL8180) += rtl818x/
-obj-$(CONFIG_RTL8187) += rtl818x/
-obj-$(CONFIG_RTLWIFI) += rtlwifi/
+obj-$(CONFIG_WLAN) += realtek/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index df7c761..7d3231a 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -82,6 +82,16 @@
#define BMI_NVRAM_SEG_NAME_SZ 16
+#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
+
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK 0x18000
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15
+
+#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+
struct bmi_cmd {
__le32 id; /* enum bmi_cmd_id */
union {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index cf28fbe..84220c3 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -413,7 +413,7 @@
lockdep_assert_held(&ar_pci->ce_lock);
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
- return -EIO;
+ return -ENOSPC;
desc->addr = __cpu_to_le32(paddr);
desc->nbytes = 0;
@@ -1076,9 +1076,7 @@
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr,
- void (*send_cb)(struct ath10k_ce_pipe *),
- void (*recv_cb)(struct ath10k_ce_pipe *))
+ const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
@@ -1104,10 +1102,10 @@
ce_state->src_sz_max = attr->src_sz_max;
if (attr->src_nentries)
- ce_state->send_cb = send_cb;
+ ce_state->send_cb = attr->send_cb;
if (attr->dest_nentries)
- ce_state->recv_cb = recv_cb;
+ ce_state->recv_cb = attr->recv_cb;
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 5c903e15..dbb94fd 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -209,9 +209,7 @@
const struct ce_attr *attr);
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr,
- void (*send_cb)(struct ath10k_ce_pipe *),
- void (*recv_cb)(struct ath10k_ce_pipe *));
+ const struct ce_attr *attr);
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
@@ -277,6 +275,9 @@
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
+
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
};
#define SR_BA_ADDRESS 0x0000
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 879625a..13de361 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -448,6 +448,56 @@
return ret;
}
+static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
+{
+ u32 result, address;
+ u8 board_id, chip_id;
+ int ret;
+
+ address = ar->hw_params.patch_load_addr;
+
+ if (!ar->otp_data || !ar->otp_len) {
+ ath10k_warn(ar,
+ "failed to retrieve board id because of invalid otp\n");
+ return -ENODATA;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot upload otp to 0x%x len %zd for board id\n",
+ address, ar->otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp for board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
+ &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp for board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
+ chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
+ result, board_id, chip_id);
+
+ if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+ return -EOPNOTSUPP;
+
+ ar->id.bmi_ids_valid = true;
+ ar->id.bmi_board_id = board_id;
+ ar->id.bmi_chip_id = chip_id;
+
+ return 0;
+}
+
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
@@ -486,8 +536,8 @@
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
- ar->fw_features))
- && result != 0) {
+ ar->fw_features)) &&
+ result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
@@ -510,7 +560,7 @@
data_len = ar->firmware_len;
mode_name = "normal";
ret = ath10k_swap_code_seg_configure(ar,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
if (ret) {
ath10k_err(ar, "failed to configure fw code swap: %d\n",
ret);
@@ -541,11 +591,18 @@
return ret;
}
-static void ath10k_core_free_firmware_files(struct ath10k *ar)
+static void ath10k_core_free_board_files(struct ath10k *ar)
{
if (!IS_ERR(ar->board))
release_firmware(ar->board);
+ ar->board = NULL;
+ ar->board_data = NULL;
+ ar->board_len = 0;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
if (!IS_ERR(ar->otp))
release_firmware(ar->otp);
@@ -557,10 +614,6 @@
ath10k_swap_code_seg_release(ar);
- ar->board = NULL;
- ar->board_data = NULL;
- ar->board_len = 0;
-
ar->otp = NULL;
ar->otp_data = NULL;
ar->otp_len = 0;
@@ -570,7 +623,6 @@
ar->firmware_len = 0;
ar->cal_file = NULL;
-
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -592,25 +644,7 @@
return 0;
}
-static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
-{
- char filename[100];
-
- scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
- ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
-
- ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
- if (IS_ERR(ar->board))
- return PTR_ERR(ar->board);
-
- ar->board_data = ar->board->data;
- ar->board_len = ar->board->size;
- ar->spec_board_loaded = true;
-
- return 0;
-}
-
-static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
{
if (!ar->hw_params.fw.board) {
ath10k_err(ar, "failed to find board file fw entry\n");
@@ -625,35 +659,236 @@
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
- ar->spec_board_loaded = false;
+
+ return 0;
+}
+
+static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
+ const void *buf, size_t buf_len,
+ const char *boardname)
+{
+ const struct ath10k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH10K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath10k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH10K_BD_IE_BOARD_NAME:
+ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH10K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found board data for '%s'",
+ boardname);
+
+ ar->board_data = board_ie_data;
+ ar->board_len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ default:
+ ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
+ const char *boardname,
+ const char *filename)
+{
+ size_t len, magic_len, ie_len;
+ struct ath10k_fw_ie *hdr;
+ const u8 *data;
+ int ret, ie_id;
+
+ ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+ if (IS_ERR(ar->board))
+ return PTR_ERR(ar->board);
+
+ data = ar->board->data;
+ len = ar->board->size;
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n",
+ ar->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) {
+ ath10k_err(ar, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n",
+ ar->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH10K_BD_IE_BOARD:
+ ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+ boardname);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+
+ /* board data found */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!ar->board_data || !ar->board_len) {
+ ath10k_err(ar,
+ "failed to fetch board data for %s from %s/%s\n",
+ ar->hw_params.fw.dir, boardname, filename);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath10k_core_free_board_files(ar);
+ return ret;
+}
+
+static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
+ size_t name_len)
+{
+ if (ar->id.bmi_ids_valid) {
+ scnprintf(name, name_len,
+ "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.bmi_chip_id,
+ ar->id.bmi_board_id);
+ goto out;
+ }
+
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.vendor, ar->id.device,
+ ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+out:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
return 0;
}
static int ath10k_core_fetch_board_file(struct ath10k *ar)
{
+ char boardname[100];
int ret;
- if (strlen(ar->spec_board_id) > 0) {
- ret = ath10k_core_fetch_spec_board_file(ar);
- if (ret) {
- ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
- ret);
- goto generic;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
- ar->spec_board_id);
- return 0;
- }
-
-generic:
- ret = ath10k_core_fetch_generic_board_file(ar);
+ ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname));
if (ret) {
- ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+ ath10k_err(ar, "failed to create board name: %d", ret);
return ret;
}
+ ar->bd_api = 2;
+ ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+ ATH10K_BOARD_API2_FILE);
+ if (!ret)
+ goto success;
+
+ ar->bd_api = 1;
+ ret = ath10k_core_fetch_board_data_api_1(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board data\n");
+ return ret;
+ }
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
return 0;
}
@@ -885,12 +1120,6 @@
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
- ret = ath10k_core_fetch_board_file(ar);
- if (ret) {
- ath10k_err(ar, "failed to fetch board file: %d\n", ret);
- return ret;
- }
-
ar->fw_api = 5;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -1263,10 +1492,10 @@
goto err;
/* Some of of qca988x solutions are having global reset issue
- * during target initialization. Bypassing PLL setting before
- * downloading firmware and letting the SoC run on REF_CLK is
- * fixing the problem. Corresponding firmware change is also needed
- * to set the clock source once the target is initialized.
+ * during target initialization. Bypassing PLL setting before
+ * downloading firmware and letting the SoC run on REF_CLK is
+ * fixing the problem. Corresponding firmware change is also needed
+ * to set the clock source once the target is initialized.
*/
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
ar->fw_features)) {
@@ -1500,6 +1729,19 @@
goto err_power_down;
}
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ goto err_free_firmware_files;
+ }
+
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
ath10k_err(ar, "fatal problem with firmware features: %d\n",
@@ -1627,6 +1869,7 @@
ath10k_testmode_destroy(ar);
ath10k_core_free_firmware_files(ar);
+ ath10k_core_free_board_files(ar);
ath10k_debug_unregister(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 04e040a..7cc7cdd 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -250,6 +250,30 @@
struct list_head peers;
};
+#define ATH10K_TPC_TABLE_TYPE_FLAG 1
+#define ATH10K_TPC_PREAM_TABLE_END 0xFFFF
+
+struct ath10k_tpc_table {
+ u32 pream_idx[WMI_TPC_RATE_MAX];
+ u8 rate_code[WMI_TPC_RATE_MAX];
+ char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats {
+ u32 reg_domain;
+ u32 chan_freq;
+ u32 phy_mode;
+ u32 twice_antenna_reduction;
+ u32 twice_max_rd_power;
+ s32 twice_antenna_gain;
+ u32 power_limit;
+ u32 num_tx_chain;
+ u32 ctl;
+ u32 rate_max;
+ u8 flag[WMI_TPC_FLAG];
+ struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG];
+};
+
struct ath10k_dfs_stats {
u32 phy_errors;
u32 pulses_total;
@@ -378,6 +402,11 @@
struct ath10k_dfs_stats dfs_stats;
struct ath_dfs_pool_stats dfs_pool_stats;
+ /* used for tpc-dump storage, protected by data-lock */
+ struct ath10k_tpc_stats *tpc_stats;
+
+ struct completion tpc_complete;
+
/* protected by conf_mutex */
u32 fw_dbglog_mask;
u32 fw_dbglog_level;
@@ -647,10 +676,19 @@
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
} swap;
- char spec_board_id[100];
- bool spec_board_loaded;
+ struct {
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+
+ bool bmi_ids_valid;
+ u8 bmi_board_id;
+ u8 bmi_chip_id;
+ } id;
int fw_api;
+ int bd_api;
enum ath10k_cal_mode cal_mode;
struct {
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index bf033f4..6cc1aa3 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -125,19 +125,25 @@
void ath10k_print_driver_info(struct ath10k *ar)
{
char fw_features[128] = {};
+ char boardinfo[100];
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
- ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
+ if (ar->id.bmi_ids_valid)
+ scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d",
+ ar->id.bmi_chip_id, ar->id.bmi_board_id);
+ else
+ scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x",
+ ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+ ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
- (strlen(ar->spec_board_id) > 0 ? ", " : ""),
- ar->spec_board_id,
- (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
- ? " fallback" : ""),
+ boardinfo,
ar->hw->wiphy->fw_version,
ar->fw_api,
+ ar->bd_api,
ar->htt.target_version_major,
ar->htt.target_version_minor,
ar->wmi.op_version,
@@ -285,28 +291,6 @@
spin_unlock_bh(&ar->data_lock);
}
-static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
-{
- struct ath10k_fw_stats_peer *i;
- size_t num = 0;
-
- list_for_each_entry(i, head, list)
- ++num;
-
- return num;
-}
-
-static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
-{
- struct ath10k_fw_stats_vdev *i;
- size_t num = 0;
-
- list_for_each_entry(i, head, list)
- ++num;
-
- return num;
-}
-
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_fw_stats stats = {};
@@ -343,8 +327,8 @@
goto free;
}
- num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
- num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
+ num_peers = ath10k_wmi_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+ num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
!list_empty(&stats.pdevs));
is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
@@ -429,240 +413,6 @@
return 0;
}
-/* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
-
-static void ath10k_fw_stats_fill(struct ath10k *ar,
- struct ath10k_fw_stats *fw_stats,
- char *buf)
-{
- unsigned int len = 0;
- unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
- const struct ath10k_fw_stats_pdev *pdev;
- const struct ath10k_fw_stats_vdev *vdev;
- const struct ath10k_fw_stats_peer *peer;
- size_t num_peers;
- size_t num_vdevs;
- int i;
-
- spin_lock_bh(&ar->data_lock);
-
- pdev = list_first_entry_or_null(&fw_stats->pdevs,
- struct ath10k_fw_stats_pdev, list);
- if (!pdev) {
- ath10k_warn(ar, "failed to get pdev stats\n");
- goto unlock;
- }
-
- num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
- num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
-
- len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s\n",
- "ath10k PDEV stats");
- len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
- "=================");
-
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Channel noise floor", pdev->ch_noise_floor);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Channel TX power", pdev->chan_tx_power);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "TX frame count", pdev->tx_frame_count);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RX frame count", pdev->rx_frame_count);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RX clear count", pdev->rx_clear_count);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Cycle count", pdev->cycle_count);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "PHY error count", pdev->phy_err_count);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RTS bad count", pdev->rts_bad);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RTS good count", pdev->rts_good);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "FCS bad count", pdev->fcs_bad);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "No beacon count", pdev->no_beacons);
- len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "MIB int count", pdev->mib_int_count);
-
- len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s\n",
- "ath10k PDEV TX stats");
- len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
- "=================");
-
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HTT cookies queued", pdev->comp_queued);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HTT cookies disp.", pdev->comp_delivered);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDU queued", pdev->msdu_enqued);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU queued", pdev->mpdu_enqued);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs dropped", pdev->wmm_drop);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Local enqued", pdev->local_enqued);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Local freed", pdev->local_freed);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HW queued", pdev->hw_queued);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PPDUs reaped", pdev->hw_reaped);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Num underruns", pdev->underrun);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PPDUs cleaned", pdev->tx_abort);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs requed", pdev->mpdus_requed);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Excessive retries", pdev->tx_ko);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HW rate", pdev->data_rc);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Sched self tiggers", pdev->self_triggers);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Dropped due to SW retries",
- pdev->sw_retry_failure);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Illegal rate phy errors",
- pdev->illgl_rate_phy_err);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Pdev continous xretry", pdev->pdev_cont_xretry);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "TX timeout", pdev->pdev_tx_timeout);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PDEV resets", pdev->pdev_resets);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY underrun", pdev->phy_underrun);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU is more than txop limit", pdev->txop_ovf);
-
- len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s\n",
- "ath10k PDEV RX stats");
- len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
- "=================");
-
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Mid PPDU route change",
- pdev->mid_ppdu_route_change);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Tot. number of statuses", pdev->status_rcvd);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 0", pdev->r0_frags);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 1", pdev->r1_frags);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 2", pdev->r2_frags);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 3", pdev->r3_frags);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs delivered to HTT", pdev->htt_msdus);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs delivered to HTT", pdev->htt_mpdus);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs delivered to stack", pdev->loc_msdus);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs delivered to stack", pdev->loc_mpdus);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Oversized AMSUs", pdev->oversize_amsdu);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY errors", pdev->phy_errs);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY errors drops", pdev->phy_err_drop);
- len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
-
- len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
- "ath10k VDEV stats", num_vdevs);
- len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
- "=================");
-
- list_for_each_entry(vdev, &fw_stats->vdevs, list) {
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "vdev id", vdev->vdev_id);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "beacon snr", vdev->beacon_snr);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "data snr", vdev->data_snr);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "num rx frames", vdev->num_rx_frames);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "num rts fail", vdev->num_rts_fail);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "num rts success", vdev->num_rts_success);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "num rx err", vdev->num_rx_err);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "num rx discard", vdev->num_rx_discard);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "num tx not acked", vdev->num_tx_not_acked);
-
- for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
- len += scnprintf(buf + len, buf_len - len,
- "%25s [%02d] %u\n",
- "num tx frames", i,
- vdev->num_tx_frames[i]);
-
- for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
- len += scnprintf(buf + len, buf_len - len,
- "%25s [%02d] %u\n",
- "num tx frames retries", i,
- vdev->num_tx_frames_retries[i]);
-
- for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
- len += scnprintf(buf + len, buf_len - len,
- "%25s [%02d] %u\n",
- "num tx frames failures", i,
- vdev->num_tx_frames_failures[i]);
-
- for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
- len += scnprintf(buf + len, buf_len - len,
- "%25s [%02d] 0x%08x\n",
- "tx rate history", i,
- vdev->tx_rate_history[i]);
-
- for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
- len += scnprintf(buf + len, buf_len - len,
- "%25s [%02d] %u\n",
- "beacon rssi history", i,
- vdev->beacon_rssi_history[i]);
-
- len += scnprintf(buf + len, buf_len - len, "\n");
- }
-
- len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
- "ath10k PEER stats", num_peers);
- len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
- "=================");
-
- list_for_each_entry(peer, &fw_stats->peers, list) {
- len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
- "Peer MAC address", peer->peer_macaddr);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer RSSI", peer->peer_rssi);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer TX rate", peer->peer_tx_rate);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer RX rate", peer->peer_rx_rate);
- len += scnprintf(buf + len, buf_len - len, "\n");
- }
-
-unlock:
- spin_unlock_bh(&ar->data_lock);
-
- if (len >= buf_len)
- buf[len - 1] = 0;
- else
- buf[len] = 0;
-}
-
static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
{
struct ath10k *ar = inode->i_private;
@@ -688,7 +438,12 @@
goto err_free;
}
- ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+ ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+ if (ret) {
+ ath10k_warn(ar, "failed to fill fw stats: %d\n", ret);
+ goto err_free;
+ }
+
file->private_data = buf;
mutex_unlock(&ar->conf_mutex);
@@ -1843,6 +1598,233 @@
.llseek = default_llseek,
};
+#define ATH10K_TPC_CONFIG_BUF_SIZE (1024 * 1024)
+
+static int ath10k_debug_tpc_stats_request(struct ath10k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc config: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+ 1 * HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats)
+{
+ spin_lock_bh(&ar->data_lock);
+
+ kfree(ar->debug.tpc_stats);
+ ar->debug.tpc_stats = tpc_stats;
+ complete(&ar->debug.tpc_complete);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+ unsigned int j, char *buf, unsigned int *len)
+{
+ unsigned int i, buf_len;
+ static const char table_str[][5] = { "CDD",
+ "STBC",
+ "TXBF" };
+ static const char pream_str[][6] = { "CCK",
+ "OFDM",
+ "HT20",
+ "HT40",
+ "VHT20",
+ "VHT40",
+ "VHT80",
+ "HTCUP" };
+
+ buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "********************************\n");
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "******************* %s POWER TABLE ****************\n",
+ table_str[j]);
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "********************************\n");
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "No. Preamble Rate_code tpc_value1 tpc_value2 tpc_value3\n");
+
+ for (i = 0; i < tpc_stats->rate_max; i++) {
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "%8d %s 0x%2x %s\n", i,
+ pream_str[tpc_stats->tpc_table[j].pream_idx[i]],
+ tpc_stats->tpc_table[j].rate_code[i],
+ tpc_stats->tpc_table[j].tpc_value[i]);
+ }
+
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "***********************************\n");
+}
+
+static void ath10k_tpc_stats_fill(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats,
+ char *buf)
+{
+ unsigned int len, j, buf_len;
+
+ len = 0;
+ buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!tpc_stats) {
+ ath10k_warn(ar, "failed to get tpc stats\n");
+ goto unlock;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "*************************************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "TPC config for channel %4d mode %d\n",
+ tpc_stats->chan_freq,
+ tpc_stats->phy_mode);
+ len += scnprintf(buf + len, buf_len - len,
+ "*************************************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "CTL = 0x%2x Reg. Domain = %2d\n",
+ tpc_stats->ctl,
+ tpc_stats->reg_domain);
+ len += scnprintf(buf + len, buf_len - len,
+ "Antenna Gain = %2d Reg. Max Antenna Gain = %2d\n",
+ tpc_stats->twice_antenna_gain,
+ tpc_stats->twice_antenna_reduction);
+ len += scnprintf(buf + len, buf_len - len,
+ "Power Limit = %2d Reg. Max Power = %2d\n",
+ tpc_stats->power_limit,
+ tpc_stats->twice_max_rd_power / 2);
+ len += scnprintf(buf + len, buf_len - len,
+ "Num tx chains = %2d Num supported rates = %2d\n",
+ tpc_stats->num_tx_chain,
+ tpc_stats->rate_max);
+
+ for (j = 0; j < tpc_stats->num_tx_chain ; j++) {
+ switch (j) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "CDD not supported\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "STBC not supported\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "TXBF not supported\n***************************\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ default:
+ len += scnprintf(buf + len, buf_len - len,
+ "Invalid Type\n");
+ break;
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static int ath10k_tpc_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_tpc_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc config stats: %d\n",
+ ret);
+ goto err_free;
+ }
+
+ ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tpc_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ unsigned int len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats = {
+ .open = ath10k_tpc_stats_open,
+ .release = ath10k_tpc_stats_release,
+ .read = ath10k_tpc_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
@@ -2111,6 +2093,8 @@
ar->debug.fw_crash_data = NULL;
ath10k_debug_fw_stats_reset(ar);
+
+ kfree(ar->debug.tpc_stats);
}
int ath10k_debug_register(struct ath10k *ar)
@@ -2127,6 +2111,7 @@
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
+ init_completion(&ar->debug.tpc_complete);
init_completion(&ar->debug.fw_stats_complete);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -2195,6 +2180,9 @@
debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_quiet_period);
+ debugfs_create_file("tpc_stats", S_IRUSR,
+ ar->debug.debugfs_phy, ar, &fops_tpc_stats);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 53bd6a1..7de780c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -55,6 +55,9 @@
ATH10K_DBG_AGGR_MODE_MAX,
};
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+
extern unsigned int ath10k_debug_mask;
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
@@ -70,6 +73,8 @@
int ath10k_debug_register(struct ath10k *ar);
void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
@@ -117,6 +122,12 @@
{
}
+static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats)
+{
+ kfree(tpc_stats);
+}
+
static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
int len)
{
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 0c92e02..89e7076 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -30,13 +30,6 @@
u16 len;
};
-struct ath10k_hif_cb {
- int (*tx_completion)(struct ath10k *ar,
- struct sk_buff *wbuf);
- int (*rx_completion)(struct ath10k *ar,
- struct sk_buff *wbuf);
-};
-
struct ath10k_hif_ops {
/* send a scatter-gather list to the target */
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
@@ -65,8 +58,7 @@
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe,
- int *ul_is_polled, int *dl_is_polled);
+ u8 *ul_pipe, u8 *dl_pipe);
void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
@@ -80,9 +72,6 @@
*/
void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
- void (*set_callbacks)(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks);
-
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
u32 (*read32)(struct ath10k *ar, u32 address);
@@ -142,13 +131,10 @@
static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe,
- int *ul_is_polled,
- int *dl_is_polled)
+ u8 *ul_pipe, u8 *dl_pipe)
{
return ar->hif.ops->map_service_to_pipe(ar, service_id,
- ul_pipe, dl_pipe,
- ul_is_polled, dl_is_polled);
+ ul_pipe, dl_pipe);
}
static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
@@ -163,12 +149,6 @@
ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
-static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks)
-{
- ar->hif.ops->set_callbacks(ar, callbacks);
-}
-
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
u8 pipe_id)
{
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 32d9ff1..5b3c6bc 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -23,16 +23,6 @@
/* Send */
/********/
-static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
- int force)
-{
- /*
- * Check whether HIF has any prior sends that have finished,
- * have not had the post-processing done.
- */
- ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
-}
-
static void ath10k_htc_control_tx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -181,24 +171,22 @@
return ret;
}
-static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htc *htc = &ar->htc;
struct ath10k_skb_cb *skb_cb;
struct ath10k_htc_ep *ep;
if (WARN_ON_ONCE(!skb))
- return 0;
+ return;
skb_cb = ATH10K_SKB_CB(skb);
ep = &htc->endpoint[skb_cb->eid];
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
-
- return 0;
}
+EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
/***********/
/* Receive */
@@ -304,8 +292,7 @@
return status;
}
-static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
{
int status = 0;
struct ath10k_htc *htc = &ar->htc;
@@ -326,21 +313,11 @@
ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
hdr, sizeof(*hdr));
- status = -EINVAL;
goto out;
}
ep = &htc->endpoint[eid];
- /*
- * If this endpoint that received a message from the target has
- * a to-target HIF pipe whose send completions are polled rather
- * than interrupt-driven, this is a good point to ask HIF to check
- * whether it has any completed sends to handle.
- */
- if (ep->ul_is_polled)
- ath10k_htc_send_complete_check(ep, 1);
-
payload_len = __le16_to_cpu(hdr->len);
if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
@@ -348,7 +325,6 @@
payload_len + sizeof(*hdr));
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
hdr, sizeof(*hdr));
- status = -EINVAL;
goto out;
}
@@ -358,7 +334,6 @@
skb->len, payload_len);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
"", hdr, sizeof(*hdr));
- status = -EINVAL;
goto out;
}
@@ -374,7 +349,6 @@
(trailer_len > payload_len)) {
ath10k_warn(ar, "Invalid trailer length: %d\n",
trailer_len);
- status = -EPROTO;
goto out;
}
@@ -407,7 +381,6 @@
* sending unsolicited messages on the ep 0
*/
ath10k_warn(ar, "HTC rx ctrl still processing\n");
- status = -EINVAL;
complete(&htc->ctl_resp);
goto out;
}
@@ -439,9 +412,8 @@
skb = NULL;
out:
kfree_skb(skb);
-
- return status;
}
+EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
static void ath10k_htc_control_rx_complete(struct ath10k *ar,
struct sk_buff *skb)
@@ -767,9 +739,7 @@
status = ath10k_hif_map_service_to_pipe(htc->ar,
ep->service_id,
&ep->ul_pipe_id,
- &ep->dl_pipe_id,
- &ep->ul_is_polled,
- &ep->dl_is_polled);
+ &ep->dl_pipe_id);
if (status)
return status;
@@ -778,10 +748,6 @@
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
- ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot htc ep %d ul polled %d dl polled %d\n",
- ep->eid, ep->ul_is_polled, ep->dl_is_polled);
-
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ar, ATH10K_DBG_BOOT,
@@ -841,7 +807,6 @@
/* registered target arrival callback from the HIF layer */
int ath10k_htc_init(struct ath10k *ar)
{
- struct ath10k_hif_cb htc_callbacks;
struct ath10k_htc_ep *ep = NULL;
struct ath10k_htc *htc = &ar->htc;
@@ -849,15 +814,11 @@
ath10k_htc_reset_endpoint_states(htc);
- /* setup HIF layer callbacks */
- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
htc->ar = ar;
/* Get HIF default pipe for HTC message exchange */
ep = &htc->endpoint[ATH10K_HTC_EP_0];
- ath10k_hif_set_callbacks(ar, &htc_callbacks);
ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
init_completion(&htc->ctl_resp);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 527179c..e70aa38 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -312,8 +312,6 @@
int max_ep_message_len;
u8 ul_pipe_id;
u8 dl_pipe_id;
- int ul_is_polled; /* call HIF to get tx completions */
- int dl_is_polled; /* call HIF to fetch rx (not implemented) */
u8 seq_no; /* for debugging */
int tx_credits;
@@ -355,5 +353,7 @@
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 5a8e4ea..2bad50e 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1488,7 +1488,6 @@
int num_pending_mgmt_tx;
struct idr pending_tx;
wait_queue_head_t empty_tx_wq;
- struct dma_pool *tx_pool;
/* set if host-fw communication goes haywire
* used to avoid further failures */
@@ -1509,6 +1508,11 @@
dma_addr_t paddr;
struct htt_msdu_ext_desc *vaddr;
} frag_desc;
+
+ struct {
+ dma_addr_t paddr;
+ struct ath10k_htt_txbuf *vaddr;
+ } txbuf;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1587,6 +1591,7 @@
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 606c1a3..6060dda 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2125,6 +2125,7 @@
/* Free the indication buffer */
dev_kfree_skb_any(skb);
}
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index eb5ba9b..1682397 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -108,9 +108,12 @@
spin_lock_init(&htt->tx_lock);
idr_init(&htt->pending_tx);
- htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
- sizeof(struct ath10k_htt_txbuf), 4, 0);
- if (!htt->tx_pool) {
+ size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
+ htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_DMA);
+ if (!htt->txbuf.vaddr) {
+ ath10k_err(ar, "failed to alloc tx buffer\n");
ret = -ENOMEM;
goto free_idr_pending_tx;
}
@@ -125,14 +128,17 @@
if (!htt->frag_desc.vaddr) {
ath10k_warn(ar, "failed to alloc fragment desc memory\n");
ret = -ENOMEM;
- goto free_tx_pool;
+ goto free_txbuf;
}
skip_frag_desc_alloc:
return 0;
-free_tx_pool:
- dma_pool_destroy(htt->tx_pool);
+free_txbuf:
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf);
+ dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+ htt->txbuf.paddr);
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
return ret;
@@ -160,7 +166,13 @@
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
- dma_pool_destroy(htt->tx_pool);
+
+ if (htt->txbuf.vaddr) {
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf);
+ dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+ htt->txbuf.paddr);
+ }
if (htt->frag_desc.vaddr) {
size = htt->max_num_pending_tx *
@@ -175,6 +187,12 @@
dev_kfree_skb_any(skb);
}
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
+
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
@@ -454,9 +472,9 @@
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
- if (res < 0) {
+ if (res < 0)
goto err_tx_dec;
- }
+
msdu_id = res;
txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -521,7 +539,6 @@
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
- dma_addr_t paddr = 0;
u32 frags_paddr = 0;
struct htt_msdu_ext_desc *ext_desc = NULL;
bool limit_mgmt_desc = false;
@@ -542,21 +559,17 @@
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
- if (res < 0) {
+ if (res < 0)
goto err_tx_dec;
- }
+
msdu_id = res;
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
- &paddr);
- if (!skb_cb->htt.txbuf) {
- res = -ENOMEM;
- goto err_free_msdu_id;
- }
- skb_cb->htt.txbuf_paddr = paddr;
+ skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
+ skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf) * msdu_id);
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
@@ -574,7 +587,7 @@
res = dma_mapping_error(dev, skb_cb->paddr);
if (res) {
res = -EIO;
- goto err_free_txbuf;
+ goto err_free_msdu_id;
}
switch (skb_cb->txmode) {
@@ -706,10 +719,6 @@
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-err_free_txbuf:
- dma_pool_free(htt->tx_pool,
- skb_cb->htt.txbuf,
- skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 80c174f..2d87737 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -97,6 +97,9 @@
/* includes also the null byte */
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
+#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD"
+
+#define ATH10K_BOARD_API2_FILE "board-2.bin"
#define REG_DUMP_COUNT_QCA988X 60
@@ -159,6 +162,16 @@
ATH10K_FW_HTT_OP_VERSION_MAX,
};
+enum ath10k_bd_ie_type {
+ /* contains sub IEs of enum ath10k_bd_ie_board_type */
+ ATH10K_BD_IE_BOARD = 0,
+};
+
+enum ath10k_bd_ie_board_type {
+ ATH10K_BD_IE_BOARD_NAME = 0,
+ ATH10K_BD_IE_BOARD_DATA = 1,
+};
+
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 79490ad..484c1a1 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -197,9 +197,8 @@
return -EOPNOTSUPP;
}
- if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- }
if (cmd == DISABLE_KEY) {
arg.key_cipher = WMI_CIPHER_NONE;
@@ -1111,7 +1110,8 @@
ret = ath10k_monitor_stop(ar);
if (ret)
- ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+ ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
+ ret);
/* not serious */
}
@@ -2084,7 +2084,8 @@
enum ieee80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
- int i, n, max_nss;
+ int i, n;
+ u8 max_nss;
u32 stbc;
lockdep_assert_held(&ar->conf_mutex);
@@ -2169,7 +2170,7 @@
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = max_nss;
+ arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -4065,6 +4066,7 @@
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
{
int nsts = ar->vht_cap_info;
+
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
@@ -4081,8 +4083,9 @@
static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
{
int sound_dim = ar->vht_cap_info;
+
sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
- sound_dim >>=IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
/* If the sounding dimension is not advertised by the firmware,
* let's use a default value of 1
@@ -4656,7 +4659,7 @@
info->use_cts_prot ? 1 : 0);
if (ret)
ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
- info->use_cts_prot, arvif->vdev_id, ret);
+ info->use_cts_prot, arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -6268,8 +6271,8 @@
rcu_read_lock();
if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
ieee80211_iter_chan_contexts_atomic(ar->hw,
- ath10k_mac_get_any_chandef_iter,
- &def);
+ ath10k_mac_get_any_chandef_iter,
+ &def);
if (vifs)
def = &vifs[0].new_ctx->def;
@@ -7301,7 +7304,7 @@
ath10k_reg_notifier);
if (ret) {
ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
- goto err_free;
+ goto err_dfs_detector_exit;
}
ar->hw->wiphy->cipher_suites = cipher_suites;
@@ -7310,7 +7313,7 @@
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
- goto err_free;
+ goto err_dfs_detector_exit;
}
if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
@@ -7324,10 +7327,16 @@
err_unregister:
ieee80211_unregister_hw(ar->hw);
+
+err_dfs_detector_exit:
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
err_free:
kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+ SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 110fcad6..5c05b0c 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -104,6 +104,10 @@
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@@ -112,6 +116,7 @@
.src_nentries = 16,
.src_sz_max = 256,
.dest_nentries = 0,
+ .send_cb = ath10k_pci_htc_tx_cb,
},
/* CE1: target->host HTT + HTC control */
@@ -120,6 +125,7 @@
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
+ .recv_cb = ath10k_pci_htc_rx_cb,
},
/* CE2: target->host WMI */
@@ -128,6 +134,7 @@
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 128,
+ .recv_cb = ath10k_pci_htc_rx_cb,
},
/* CE3: host->target WMI */
@@ -136,6 +143,7 @@
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath10k_pci_htc_tx_cb,
},
/* CE4: host->target HTT */
@@ -144,14 +152,16 @@
.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
.src_sz_max = 256,
.dest_nentries = 0,
+ .send_cb = ath10k_pci_htt_tx_cb,
},
- /* CE5: unused */
+ /* CE5: target->host HTT (HIF->HTT) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_pci_htt_rx_cb,
},
/* CE6: target autonomous hif_memcpy */
@@ -257,12 +267,12 @@
/* NB: 50% of src nentries, since tx has 2 frags */
- /* CE5: unused */
+ /* CE5: target->host HTT (HIF->HTT) */
{
.pipenum = __cpu_to_le32(5),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
+ .nbytes_max = __cpu_to_le32(512),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
@@ -396,7 +406,7 @@
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(1),
+ __cpu_to_le32(5),
},
/* (Additions here) */
@@ -452,8 +462,12 @@
int curr_delay = 5;
while (tot_delay < PCIE_WAKE_TIMEOUT) {
- if (ath10k_pci_is_awake(ar))
+ if (ath10k_pci_is_awake(ar)) {
+ if (tot_delay > PCIE_WAKE_LATE_US)
+ ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
+ tot_delay / 1000);
return 0;
+ }
udelay(curr_delay);
tot_delay += curr_delay;
@@ -465,12 +479,53 @@
return -ETIMEDOUT;
}
+static int ath10k_pci_force_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ if (!ar_pci->ps_awake) {
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+
+ ret = ath10k_pci_wake_wait(ar);
+ if (ret == 0)
+ ar_pci->ps_awake = true;
+ }
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+ return ret;
+}
+
+static void ath10k_pci_force_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ ar_pci->ps_awake = false;
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
static int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
int ret = 0;
+ if (ar_pci->pci_ps == 0)
+ return ret;
+
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
@@ -502,6 +557,9 @@
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
+ if (ar_pci->pci_ps == 0)
+ return;
+
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
@@ -544,6 +602,11 @@
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
+ if (ar_pci->pci_ps == 0) {
+ ath10k_pci_force_sleep(ar);
+ return;
+ }
+
del_timer_sync(&ar_pci->ps_timer);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
@@ -682,8 +745,6 @@
dma_addr_t paddr;
int ret;
- lockdep_assert_held(&ar_pci->ce_lock);
-
skb = dev_alloc_skb(pipe->buf_sz);
if (!skb)
return -ENOMEM;
@@ -701,9 +762,10 @@
ATH10K_SKB_RXCB(skb)->paddr = paddr;
+ spin_lock_bh(&ar_pci->ce_lock);
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+ spin_unlock_bh(&ar_pci->ce_lock);
if (ret) {
- ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -713,25 +775,27 @@
return 0;
}
-static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
{
struct ath10k *ar = pipe->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
int ret, num;
- lockdep_assert_held(&ar_pci->ce_lock);
-
if (pipe->buf_sz == 0)
return;
if (!ce_pipe->dest_ring)
return;
+ spin_lock_bh(&ar_pci->ce_lock);
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ spin_unlock_bh(&ar_pci->ce_lock);
while (num--) {
ret = __ath10k_pci_rx_post_buf(pipe);
if (ret) {
+ if (ret == -ENOSPC)
+ break;
ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
mod_timer(&ar_pci->rx_post_retry, jiffies +
ATH10K_PCI_RX_POST_RETRY_MS);
@@ -740,25 +804,13 @@
}
}
-static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
-{
- struct ath10k *ar = pipe->hif_ce_state;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- spin_lock_bh(&ar_pci->ce_lock);
- __ath10k_pci_rx_post_pipe(pipe);
- spin_unlock_bh(&ar_pci->ce_lock);
-}
-
static void ath10k_pci_rx_post(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
- spin_lock_bh(&ar_pci->ce_lock);
for (i = 0; i < CE_COUNT; i++)
- __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
- spin_unlock_bh(&ar_pci->ce_lock);
+ ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
}
static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
@@ -1102,11 +1154,9 @@
}
/* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct sk_buff_head list;
struct sk_buff *skb;
u32 ce_data;
@@ -1124,16 +1174,16 @@
}
while ((skb = __skb_dequeue(&list)))
- cb->tx_completion(ar, skb);
+ ath10k_htc_tx_completion_handler(ar, skb);
}
-/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
+static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
- struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct sk_buff *skb;
struct sk_buff_head list;
void *transfer_context;
@@ -1168,12 +1218,56 @@
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
skb->data, skb->len);
- cb->rx_completion(ar, skb);
+ callback(ar, skb);
}
ath10k_pci_rx_post_pipe(pipe_info);
}
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+/* Called by lower (CE) layer when a send to HTT Target completes. */
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff *skb;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
+ &nbytes, &transfer_id) == 0) {
+ /* no need to call tx completion for NULL pointers */
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ ath10k_htt_hif_tx_complete(ar, skb);
+ }
+}
+
+static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+/* Called by lower (CE) layer when HTT data is received from the Target. */
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+ ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+}
+
static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items)
{
@@ -1343,17 +1437,6 @@
ath10k_ce_per_engine_service(ar, pipe);
}
-static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
-
- memcpy(&ar_pci->msg_callbacks_current, callbacks,
- sizeof(ar_pci->msg_callbacks_current));
-}
-
static void ath10k_pci_kill_tasklet(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1368,10 +1451,8 @@
del_timer_sync(&ar_pci->rx_post_retry);
}
-static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
- u16 service_id, u8 *ul_pipe,
- u8 *dl_pipe, int *ul_is_polled,
- int *dl_is_polled)
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
@@ -1379,9 +1460,6 @@
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
- /* polling for received messages not supported */
- *dl_is_polled = 0;
-
for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
entry = &target_service_to_ce_map_wlan[i];
@@ -1415,25 +1493,17 @@
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
- *ul_is_polled =
- (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
-
return 0;
}
static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
- int ul_is_polled, dl_is_polled;
-
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
(void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL,
- ul_pipe,
- dl_pipe,
- &ul_is_polled,
- &dl_is_polled);
+ ul_pipe, dl_pipe);
}
static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
@@ -1504,6 +1574,7 @@
static int ath10k_pci_hif_start(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_irq_enable(ar);
@@ -1579,7 +1650,7 @@
ce_ring->per_transfer_context[i] = NULL;
- ar_pci->msg_callbacks_current.tx_completion(ar, skb);
+ ath10k_htc_tx_completion_handler(ar, skb);
}
}
@@ -1999,9 +2070,7 @@
pipe->pipe_num = i;
pipe->hif_ce_state = ar;
- ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
- ath10k_pci_ce_send_done,
- ath10k_pci_ce_recv_data);
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
@@ -2257,7 +2326,7 @@
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
- ret);
+ ret);
return ret;
}
@@ -2397,6 +2466,15 @@
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
+ int ret = 0;
+
+ if (ar_pci->pci_ps == 0) {
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up target: %d\n", ret);
+ return ret;
+ }
+ }
/* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
@@ -2407,7 +2485,7 @@
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- return 0;
+ return ret;
}
#endif
@@ -2421,7 +2499,6 @@
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
.send_complete_check = ath10k_pci_hif_send_complete_check,
- .set_callbacks = ath10k_pci_hif_set_callbacks,
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
.power_up = ath10k_pci_hif_power_up,
.power_down = ath10k_pci_hif_power_down,
@@ -2501,6 +2578,16 @@
{
struct ath10k *ar = arg;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ if (ar_pci->pci_ps == 0) {
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake device up on irq: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+ }
if (ar_pci->num_msi_intrs == 0) {
if (!ath10k_pci_irq_pending(ar))
@@ -2900,17 +2987,21 @@
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
u32 chip_id;
+ bool pci_ps;
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
+ pci_ps = false;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
+ pci_ps = true;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
+ pci_ps = false;
break;
default:
WARN_ON(1);
@@ -2924,19 +3015,21 @@
return -ENOMEM;
}
- ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
ar_pci = ath10k_pci_priv(ar);
ar_pci->pdev = pdev;
ar_pci->dev = &pdev->dev;
ar_pci->ar = ar;
ar->dev_id = pci_dev->device;
+ ar_pci->pci_ps = pci_ps;
- if (pdev->subsystem_vendor || pdev->subsystem_device)
- scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
- "%04x:%04x:%04x:%04x",
- pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device);
+ ar->id.vendor = pdev->vendor;
+ ar->id.device = pdev->device;
+ ar->id.subsystem_vendor = pdev->subsystem_vendor;
+ ar->id.subsystem_device = pdev->subsystem_device;
spin_lock_init(&ar_pci->ce_lock);
spin_lock_init(&ar_pci->ps_lock);
@@ -2962,6 +3055,14 @@
ath10k_pci_ce_deinit(ar);
ath10k_pci_irq_disable(ar);
+ if (ar_pci->pci_ps == 0) {
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake up device : %d\n", ret);
+ goto err_free_pipes;
+ }
+ }
+
ret = ath10k_pci_init_irq(ar);
if (ret) {
ath10k_err(ar, "failed to init irqs: %d\n", ret);
@@ -3090,13 +3191,16 @@
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 8d364fb..f91bf33 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -175,8 +175,6 @@
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
- struct ath10k_hif_cb msg_callbacks_current;
-
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
@@ -221,6 +219,12 @@
* powersave register state changes.
*/
bool ps_awake;
+
+ /* pci power save, disable for QCA988X and QCA99X0.
+ * Writing 'false' to this variable avoids frequent locking
+ * on MMIO read/write.
+ */
+ bool pci_ps;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -230,7 +234,8 @@
#define ATH10K_PCI_RX_POST_RETRY_MS 50
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
-#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
+#define PCIE_WAKE_TIMEOUT 30000 /* 30ms */
+#define PCIE_WAKE_LATE_US 10000 /* 10ms */
#define BAR_NUM 0
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 1a899d7..60fe562 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -215,6 +215,6 @@
void ath10k_thermal_unregister(struct ath10k *ar)
{
- thermal_cooling_device_unregister(ar->thermal.cdev);
sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+ thermal_cooling_device_unregister(ar->thermal.cdev);
}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 7db7d50..6d1105a 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -92,11 +92,6 @@
skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->htt.txbuf)
- dma_pool_free(htt->tx_pool,
- skb_cb->htt.txbuf,
- skb_cb->htt.txbuf_paddr);
-
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 248ffc3..b54aa08 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -177,6 +177,11 @@
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan);
struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
+ struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
+ u32 param);
+ void (*fw_stats_fill)(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1270,4 +1275,31 @@
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
}
+static inline int
+ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_tpc_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_tpc_config_cmdid);
+}
+
+static inline int
+ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ if (!ar->wmi.ops->fw_stats_fill)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
+ return 0;
+}
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index b5849b3..8f83548 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3468,6 +3468,7 @@
.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+ .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
};
/************/
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 87d9de2..6e7d7a7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3018,8 +3018,6 @@
memcpy(skb_put(bcn, arvif->u.ap.noa_len),
arvif->u.ap.noa_data,
arvif->u.ap.noa_len);
-
- return;
}
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -3507,7 +3505,7 @@
tsf);
if (res < 0) {
ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
- res);
+ res);
return;
}
break;
@@ -3835,9 +3833,258 @@
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
}
+static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
+ struct wmi_pdev_tpc_config_event *ev,
+ u32 rate_idx, u32 num_chains,
+ u32 rate_code, u8 type)
+{
+ u8 tpc, num_streams, preamble, ch, stm_idx;
+
+ num_streams = ATH10K_HW_NSS(rate_code);
+ preamble = ATH10K_HW_PREAMBLE(rate_code);
+ ch = num_chains - 1;
+
+ tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
+
+ if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+ goto out;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK)
+ goto out;
+
+ stm_idx = num_streams - 1;
+ if (num_chains <= num_streams)
+ goto out;
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_STBC:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_CDD:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
+ break;
+ default:
+ ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
+ tpc = 0;
+ break;
+ }
+
+out:
+ return tpc;
+}
+
+static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+ struct wmi_pdev_tpc_config_event *ev,
+ struct ath10k_tpc_stats *tpc_stats,
+ u8 *rate_code, u16 *pream_table, u8 type)
+{
+ u32 i, j, pream_idx, flags;
+ u8 tpc[WMI_TPC_TX_N_CHAIN];
+ char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+ char buff[WMI_TPC_BUF_SIZE];
+
+ flags = __le32_to_cpu(ev->flags);
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "invalid table type in wmi tpc event: %d\n", type);
+ return;
+ }
+
+ pream_idx = 0;
+ for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ memset(tpc_value, 0, sizeof(tpc_value));
+ memset(buff, 0, sizeof(buff));
+ if (i == pream_table[pream_idx])
+ pream_idx++;
+
+ for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+ if (j >= __le32_to_cpu(ev->num_tx_chain))
+ break;
+
+ tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+ rate_code[i],
+ type);
+ snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+ strncat(tpc_value, buff, strlen(buff));
+ }
+ tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
+ tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
+ memcpy(tpc_stats->tpc_table[type].tpc_value[i],
+ tpc_value, sizeof(tpc_value));
+ }
+}
+
void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+ u32 i, j, pream_idx, num_tx_chain;
+ u8 rate_code[WMI_TPC_RATE_MAX], rate_idx;
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ struct wmi_pdev_tpc_config_event *ev;
+ struct ath10k_tpc_stats *tpc_stats;
+
+ ev = (struct wmi_pdev_tpc_config_event *)skb->data;
+
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
+ /* Create the rate code table based on the chains supported */
+ rate_idx = 0;
+ pream_idx = 0;
+
+ /* Fill CCK rate code */
+ for (i = 0; i < 4; i++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_idx++;
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill OFDM rate code */
+ for (i = 0; i < 8; i++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_idx++;
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+
+ /* Fill HT20 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 8; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill HT40 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 8; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT20 rate code */
+ for (i = 0; i < __le32_to_cpu(ev->num_tx_chain); i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT40 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT80 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+
+ pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
+
+ tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+ tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+ tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+ tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+ tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+ tpc_stats->twice_antenna_reduction =
+ __le32_to_cpu(ev->twice_antenna_reduction);
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+ tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+ tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_CDD);
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_STBC);
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_TXBF);
+
+ ath10k_debug_tpc_stats_process(ar, tpc_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+ __le32_to_cpu(ev->chan_freq),
+ __le32_to_cpu(ev->phy_mode),
+ __le32_to_cpu(ev->ctl),
+ __le32_to_cpu(ev->reg_domain),
+ a_sle32_to_cpu(ev->twice_antenna_gain),
+ __le32_to_cpu(ev->twice_antenna_reduction),
+ __le32_to_cpu(ev->power_limit),
+ __le32_to_cpu(ev->twice_max_rd_power) / 2,
+ __le32_to_cpu(ev->num_tx_chain),
+ __le32_to_cpu(ev->rate_max));
}
void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
@@ -5090,7 +5337,7 @@
config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
- config.rx_decap_mode = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
config.bmiss_offload_max_vdev =
__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
@@ -6356,6 +6603,399 @@
return skb;
}
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+ struct wmi_pdev_get_tpc_config_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
+ cmd->param = __cpu_to_le32(param);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev get tcp config param:%d\n", param);
+ return skb;
+}
+
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static void
+ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS bad count", pdev->rts_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS good count", pdev->rts_good);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "FCS bad count", pdev->fcs_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "No beacon count", pdev->no_beacons);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MIB int count", pdev->mib_int_count);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath10k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requed", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Sched self tiggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Pdev continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath10k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "vdev id", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "Peer MAC address", peer->peer_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RSSI", peer->peer_rssi);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer TX rate", peer->peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX rate", peer->peer_rx_rate);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+ num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ unsigned int len = 0;
+ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+ num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -6414,6 +7054,7 @@
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -6479,6 +7120,7 @@
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -6545,6 +7187,7 @@
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
};
static const struct wmi_ops wmi_10_2_4_ops = {
@@ -6606,6 +7249,8 @@
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 3e5a159..6e84d1c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -73,6 +73,25 @@
#define HTC_PROTOCOL_VERSION 0x0002
#define WMI_PROTOCOL_VERSION 0x0002
+/*
+ * There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/types.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32)val);
+}
+
enum wmi_service {
WMI_SERVICE_BEACON_OFFLOAD = 0,
WMI_SERVICE_SCAN_OFFLOAD,
@@ -3642,8 +3661,18 @@
__le32 param;
} __packed;
+#define WMI_TPC_CONFIG_PARAM 1
#define WMI_TPC_RATE_MAX 160
#define WMI_TPC_TX_N_CHAIN 4
+#define WMI_TPC_PREAM_TABLE_MAX 10
+#define WMI_TPC_FLAG 3
+#define WMI_TPC_BUF_SIZE 10
+
+enum wmi_tpc_table_type {
+ WMI_TPC_TABLE_TYPE_CDD = 0,
+ WMI_TPC_TABLE_TYPE_STBC = 1,
+ WMI_TPC_TABLE_TYPE_TXBF = 2,
+};
enum wmi_tpc_config_event_flag {
WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1,
@@ -3657,7 +3686,7 @@
__le32 phy_mode;
__le32 twice_antenna_reduction;
__le32 twice_max_rd_power;
- s32 twice_antenna_gain;
+ a_sle32 twice_antenna_gain;
__le32 power_limit;
__le32 rate_max;
__le32 num_tx_chain;
@@ -4253,6 +4282,11 @@
WMI_RATE_PREAMBLE_VHT,
};
+#define ATH10K_HW_NSS(rate) (1 + (((rate) >> 4) & 0x3))
+#define ATH10K_HW_PREAMBLE(rate) (((rate) >> 6) & 0x3)
+#define ATH10K_HW_RATECODE(rate, nss, preamble) \
+ (((preamble) << 6) | ((nss) << 4) | (rate))
+
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@@ -6064,6 +6098,7 @@
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
struct ath10k_fw_stats_peer;
+struct ath10k_fw_stats;
int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
@@ -6145,4 +6180,13 @@
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
int left_len, struct wmi_phyerr_ev_arg *arg);
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
+
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index a511ef3..76b682e 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2217,7 +2217,7 @@
/* enter / leave wow suspend on first vif always */
first_vif = ath6kl_vif_first(ar);
- if (WARN_ON(unlikely(!first_vif)) ||
+ if (WARN_ON(!first_vif) ||
!ath6kl_cfg80211_ready(first_vif))
return -EIO;
@@ -2297,7 +2297,7 @@
int ret;
vif = ath6kl_vif_first(ar);
- if (WARN_ON(unlikely(!vif)) ||
+ if (WARN_ON(!vif) ||
!ath6kl_cfg80211_ready(vif))
return -EIO;
@@ -3312,7 +3312,7 @@
}
/* fw uses seconds, also make sure that it's >0 */
- interval = max_t(u16, 1, request->interval / 1000);
+ interval = max_t(u16, 1, request->scan_plans[0].interval);
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
interval, interval,
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index e481f14..fffb65b 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1085,9 +1085,7 @@
send_pkt->completion = NULL;
ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
status = ath6kl_htc_tx_issue(target, send_pkt);
-
- if (send_pkt != NULL)
- htc_reclaim_txctrl_buf(target, send_pkt);
+ htc_reclaim_txctrl_buf(target, send_pkt);
return status;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 6314ae2..9d17a53 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -610,8 +610,8 @@
#define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ -127
#define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ -116
-#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -120
+#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -112
#define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ -127
-#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -110
+#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -97
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 79fd3b2..8b238c1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -857,7 +857,7 @@
qca956x_1p0_common_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
qca956x_1p0_common_rx_gain_bounds);
- INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
qca956x_1p0_xlna_only);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -942,7 +942,7 @@
ar9462_2p1_baseband_core_mix_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
ar9462_2p1_baseband_postamble_mix_rxgain);
- INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
ar9462_2p1_baseband_postamble_5g_xlna);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -951,7 +951,7 @@
ar9462_2p0_baseband_core_mix_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
ar9462_2p0_baseband_postamble_mix_rxgain);
- INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
ar9462_2p0_baseband_postamble_5g_xlna);
}
}
@@ -961,12 +961,12 @@
if (AR_SREV_9462_21(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_2p1_common_5g_xlna_only_rxgain);
- INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
ar9462_2p1_baseband_postamble_5g_xlna);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_2p0_common_5g_xlna_only_rxgain);
- INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
ar9462_2p0_baseband_postamble_5g_xlna);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1ad66b7..201425e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -926,19 +926,18 @@
*/
if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
(ar9003_hw_get_rx_gain_idx(ah) == 3)) {
- REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
modesIndex, regWrites);
}
-
- if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
- REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
- modesIndex, regWrites);
}
if (AR_SREV_9550(ah) || AR_SREV_9561(ah))
REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
regWrites);
+ if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
+ modesIndex, regWrites);
/*
* TXGAIN initvals.
*/
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e8454db..4f0a3f6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -919,7 +919,7 @@
struct ar5416IniArray iniCckfirJapan2484;
struct ar5416IniArray iniModes_9271_ANI_reg;
struct ar5416IniArray ini_radio_post_sys2ant;
- struct ar5416IniArray ini_modes_rxgain_5g_xlna;
+ struct ar5416IniArray ini_modes_rxgain_xlna;
struct ar5416IniArray ini_modes_rxgain_bb_core;
struct ar5416IniArray ini_modes_rxgain_bb_postamble;
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 9d68712..2303ef9 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -284,10 +284,10 @@
if (cd == NULL)
return false;
- dpd->last_pulse_ts = event->ts;
/* reset detector on time stamp wraparound, caused by TSF reset */
if (event->ts < dpd->last_pulse_ts)
dpd_reset(dpd);
+ dpd->last_pulse_ts = event->ts;
/* do type individual pattern matching */
for (i = 0; i < dpd->num_radar_types; i++) {
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index ce8c038..6dfedc8 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -1,5 +1,6 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
+ select WANT_DEV_COREDUMP
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 64b4326..fdf63d5 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -17,6 +17,7 @@
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
wil6210-y += ethtool.o
+wil6210-y += wil_crash_dump.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index d1a1e16..97bc186 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1373,6 +1373,12 @@
}
}
spin_unlock_bh(&p->tid_rx_lock);
+ seq_printf(s,
+ "Rx invalid frame: non-data %lu, short %lu, large %lu\n",
+ p->stats.rx_non_data_frame,
+ p->stats.rx_short_frame,
+ p->stats.rx_large_frame);
+
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
mcs++)
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index a371f036..06fc46f 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -347,7 +347,12 @@
wil6210_mask_irq_misc(wil);
if (isr & ISR_MISC_FW_ERROR) {
- wil_err(wil, "Firmware error detected\n");
+ u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
+ u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE);
+
+ wil_err(wil,
+ "Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
+ fw_assert_code, ucode_assert_code);
clear_bit(wil_status_fwready, wil->status);
/*
* do not clear @isr here - we do 2-nd part in thread
@@ -386,6 +391,7 @@
wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
if (isr & ISR_MISC_FW_ERROR) {
+ wil_fw_core_dump(wil);
wil_notify_fw_error(wil);
isr &= ~ISR_MISC_FW_ERROR;
wil_fw_error_recovery(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2fb04c5..aade16b 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -203,11 +203,13 @@
* - disconnect single STA, already disconnected
* - disconnect all
*
- * For "disconnect all", there are 2 options:
+ * For "disconnect all", there are 3 options:
* - bssid == NULL
+ * - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
* - bssid is our MAC address
*/
- if (bssid && memcmp(ndev->dev_addr, bssid, ETH_ALEN)) {
+ if (bssid && !is_broadcast_ether_addr(bssid) &&
+ !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
cid = wil_find_cid(wil, bssid);
wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
bssid, cid, reason_code);
@@ -765,6 +767,8 @@
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
+ set_bit(wil_status_resetting, wil->status);
+
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
@@ -851,6 +855,12 @@
void wil_fw_error_recovery(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "starting fw error recovery\n");
+
+ if (test_bit(wil_status_resetting, wil->status)) {
+ wil_info(wil, "Reset already in progress\n");
+ return;
+ }
+
wil->recovery_state = fw_recovery_pending;
schedule_work(&wil->fw_error_worker);
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index feff1ef..1a3142c3 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -260,6 +260,7 @@
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int wil6210_suspend(struct device *dev, bool is_runtime)
{
@@ -307,7 +308,6 @@
return rc;
}
-#ifdef CONFIG_PM_SLEEP
static int wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 8a8cdc6..5ca0307 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -110,7 +110,7 @@
*/
for (i = 0; i < num_descriptors; i++) {
struct vring_tx_desc *_d = &pmc->pring_va[i];
- struct vring_tx_desc dd, *d = ⅆ
+ struct vring_tx_desc dd = {}, *d = ⅆ
int j = 0;
pmc->descriptors[i].va = dma_alloc_coherent(dev,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 9238c1a..e3d1be8 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -205,6 +205,32 @@
spin_unlock(&sta->tid_rx_lock);
}
+/* process BAR frame, called in NAPI context */
+void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq)
+{
+ struct wil_sta_info *sta = &wil->sta[cid];
+ struct wil_tid_ampdu_rx *r;
+
+ spin_lock(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[tid];
+ if (!r) {
+ wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
+ goto out;
+ }
+ if (seq_less(seq, r->head_seq_num)) {
+ wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
+ seq, r->head_seq_num);
+ goto out;
+ }
+ wil_dbg_txrx(wil, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n",
+ cid, tid, seq, r->head_seq_num);
+ wil_release_reorder_frames(wil, r, seq);
+
+out:
+ spin_unlock(&sta->tid_rx_lock);
+}
+
struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn)
{
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 6229110..0f8b687 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -358,6 +358,13 @@
}
}
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+ return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
/**
* reap 1 frame from @swhead
*
@@ -379,14 +386,16 @@
u16 dmalen;
u8 ftype;
int cid;
- int i = (int)vring->swhead;
+ int i;
struct wil_net_stats *stats;
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
+again:
if (unlikely(wil_vring_is_empty(vring)))
return NULL;
+ i = (int)vring->swhead;
_d = &vring->va[i].rx;
if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
/* it is not error, we just reached end of Rx done area */
@@ -398,7 +407,7 @@
wil_vring_advance_head(vring, 1);
if (!skb) {
wil_err(wil, "No Rx skb at [%d]\n", i);
- return NULL;
+ goto again;
}
d = wil_skb_rxdesc(skb);
*d = *_d;
@@ -409,13 +418,17 @@
trace_wil6210_rx(i, d);
wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
- wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
+ cid = wil_rxdesc_cid(d);
+ stats = &wil->sta[cid].stats;
+
if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ stats->rx_large_frame++;
kfree_skb(skb);
- return NULL;
+ goto again;
}
skb_trim(skb, dmalen);
@@ -424,8 +437,6 @@
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
- cid = wil_rxdesc_cid(d);
- stats = &wil->sta[cid].stats;
stats->last_mcs_rx = wil_rxdesc_mcs(d);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
@@ -437,24 +448,47 @@
/* no extra checks if in sniffer mode */
if (ndev->type != ARPHRD_ETHER)
return skb;
- /*
- * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+ /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
* Driver should recognize it by frame type, that is found
* in Rx descriptor. If type is not data, it is 802.11 frame as is
*/
ftype = wil_rxdesc_ftype(d) << 2;
if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
- wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
- /* TODO: process it */
+ u8 fc1 = wil_rxdesc_fc1(d);
+ int mid = wil_rxdesc_mid(d);
+ int tid = wil_rxdesc_tid(d);
+ u16 seq = wil_rxdesc_seq(d);
+
+ wil_dbg_txrx(wil,
+ "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ stats->rx_non_data_frame++;
+ if (wil_is_back_req(fc1)) {
+ wil_dbg_txrx(wil,
+ "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+ wil_rx_bar(wil, cid, tid, seq);
+ } else {
+ /* print again all info. One can enable only this
+ * without overhead for printing every Rx frame
+ */
+ wil_dbg_txrx(wil,
+ "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+ }
kfree_skb(skb);
- return NULL;
+ goto again;
}
if (unlikely(skb->len < ETH_HLEN + snaplen)) {
wil_err(wil, "Short frame, len = %d\n", skb->len);
- /* TODO: process it (i.e. BAR) */
+ stats->rx_short_frame++;
kfree_skb(skb);
- return NULL;
+ goto again;
}
/* L4 IDENT is on when HW calculated checksum, check status
@@ -1633,7 +1667,7 @@
goto drop;
}
if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
- wil_err(wil, "FW not connected\n");
+ wil_err_ratelimited(wil, "FW not connected\n");
goto drop;
}
if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 82a8f9a..ee7c7b4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -464,6 +464,12 @@
return WIL_GET_BITS(d->mac.d0, 12, 15);
}
+/* 1-st byte (with frame type/subtype) of FC field */
+static inline u8 wil_rxdesc_fc1(struct vring_rx_desc *d)
+{
+ return (u8)(WIL_GET_BITS(d->mac.d0, 10, 15) << 2);
+}
+
static inline int wil_rxdesc_seq(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d0, 16, 27);
@@ -501,6 +507,7 @@
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq);
struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index dd4ea92..f619bf2 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -246,6 +246,10 @@
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW_B0 (0x2632072f)
+/* crash codes for FW/Ucode stored here */
+#define RGF_FW_ASSERT_CODE (0x91f020)
+#define RGF_UCODE_ASSERT_CODE (0x91f028)
+
enum {
HW_VER_UNKNOWN,
HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
@@ -405,6 +409,7 @@
wil_status_reset_done,
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
+ wil_status_resetting, /* reset in progress */
wil_status_last /* keep last */
};
@@ -465,6 +470,9 @@
unsigned long tx_bytes;
unsigned long tx_errors;
unsigned long rx_dropped;
+ unsigned long rx_non_data_frame;
+ unsigned long rx_short_frame;
+ unsigned long rx_large_frame;
u16 last_mcs_rx;
u64 rx_per_mcs[WIL_MCS_MAX + 1];
};
@@ -820,4 +828,6 @@
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+void wil_fw_core_dump(struct wil6210_priv *wil);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
new file mode 100644
index 0000000..7e70934
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include <linux/devcoredump.h>
+
+static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
+ u32 *out_dump_size, u32 *out_host_min)
+{
+ int i;
+ const struct fw_map *map;
+ u32 host_min, host_max, tmp_max;
+
+ if (!out_dump_size)
+ return -EINVAL;
+
+ /* calculate the total size of the unpacked crash dump */
+ BUILD_BUG_ON(ARRAY_SIZE(fw_mapping) == 0);
+ map = &fw_mapping[0];
+ host_min = map->host;
+ host_max = map->host + (map->to - map->from);
+
+ for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
+ map = &fw_mapping[i];
+
+ if (map->host < host_min)
+ host_min = map->host;
+
+ tmp_max = map->host + (map->to - map->from);
+ if (tmp_max > host_max)
+ host_max = tmp_max;
+ }
+
+ *out_dump_size = host_max - host_min;
+ if (out_host_min)
+ *out_host_min = host_min;
+
+ return 0;
+}
+
+static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest,
+ u32 size)
+{
+ int i;
+ const struct fw_map *map;
+ void *data;
+ u32 host_min, dump_size, offset, len;
+
+ if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
+ wil_err(wil, "%s: fail to obtain crash dump size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dump_size > size) {
+ wil_err(wil, "%s: not enough space for dump. Need %d have %d\n",
+ __func__, dump_size, size);
+ return -EINVAL;
+ }
+
+ /* copy to crash dump area */
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ map = &fw_mapping[i];
+
+ data = (void * __force)wil->csr + HOSTADDR(map->host);
+ len = map->to - map->from;
+ offset = map->host - host_min;
+
+ wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n",
+ __func__, fw_mapping[i].name, len, offset);
+
+ wil_memcpy_fromio_32((void * __force)(dest + offset),
+ (const void __iomem * __force)data, len);
+ }
+
+ return 0;
+}
+
+void wil_fw_core_dump(struct wil6210_priv *wil)
+{
+ void *fw_dump_data;
+ u32 fw_dump_size;
+
+ if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
+ wil_err(wil, "%s: fail to get fw dump size\n", __func__);
+ return;
+ }
+
+ fw_dump_data = vzalloc(fw_dump_size);
+ if (!fw_dump_data)
+ return;
+
+ if (wil_fw_copy_crash_dump(wil, fw_dump_data, fw_dump_size)) {
+ vfree(fw_dump_data);
+ return;
+ }
+ /* fw_dump_data will be free in device coredump release function
+ * after 5 min
+ */
+ dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
+ wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__,
+ fw_dump_size);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2f35d4c..6112189 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1120,7 +1120,7 @@
cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
cmd.sniffer_cfg.phy_support =
cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
- ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS);
} else {
/* Initialize offload (in non-sniffer mode).
* Linux IP stack always calculates IP checksum
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fe3dc12..ab42b1f 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -82,5 +82,6 @@
config BRCMDBG
bool "Broadcom driver debug functions"
depends on BRCMSMAC || BRCMFMAC
+ select WANT_DEV_COREDUMP
---help---
Selecting this enables additional code for debug purposes.
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
index 89e6a4d..230cad7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -65,6 +65,8 @@
* @rxctl: receive a control response message from dongle.
* @gettxq: obtain a reference of bus transmit queue (optional).
* @wowl_config: specify if dongle is configured for wowl when going to suspend
+ * @get_ramsize: obtain size of device memory.
+ * @get_memdump: obtain device memory dump in provided buffer.
*
* This structure provides an abstract interface towards the
* bus specific driver. For control messages to common driver
@@ -79,6 +81,8 @@
int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
struct pktq * (*gettxq)(struct device *dev);
void (*wowl_config)(struct device *dev, bool enabled);
+ size_t (*get_ramsize)(struct device *dev);
+ int (*get_memdump)(struct device *dev, void *data, size_t len);
};
@@ -185,6 +189,23 @@
bus->ops->wowl_config(bus->dev, enabled);
}
+static inline size_t brcmf_bus_get_ramsize(struct brcmf_bus *bus)
+{
+ if (!bus->ops->get_ramsize)
+ return 0;
+
+ return bus->ops->get_ramsize(bus->dev);
+}
+
+static inline
+int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
+{
+ if (!bus->ops->get_memdump)
+ return -EOPNOTSUPP;
+
+ return bus->ops->get_memdump(bus->dev, data, len);
+}
+
/*
* interface functions from common layer
*/
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 891f4ed..deb5f78 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -840,7 +840,6 @@
err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
}
if (!err) {
- set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
brcmf_dbg(INFO, "IF Type = AP\n");
}
} else {
@@ -2432,6 +2431,9 @@
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
+ s32 total_rssi;
+ s32 count_rssi;
+ u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
if (!check_vif_up(ifp->vif))
@@ -2478,13 +2480,13 @@
sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
if (sinfo->tx_packets) {
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
- sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate);
- sinfo->txrate.legacy /= 100;
+ sinfo->txrate.legacy =
+ le32_to_cpu(sta_info_le.tx_rate) / 100;
}
if (sinfo->rx_packets) {
sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
- sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate);
- sinfo->rxrate.legacy /= 100;
+ sinfo->rxrate.legacy =
+ le32_to_cpu(sta_info_le.rx_rate) / 100;
}
if (le16_to_cpu(sta_info_le.ver) >= 4) {
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
@@ -2492,12 +2494,61 @@
sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
}
+ total_rssi = 0;
+ count_rssi = 0;
+ for (i = 0; i < BRCMF_ANT_MAX; i++) {
+ if (sta_info_le.rssi[i]) {
+ sinfo->chain_signal_avg[count_rssi] =
+ sta_info_le.rssi[i];
+ sinfo->chain_signal[count_rssi] =
+ sta_info_le.rssi[i];
+ total_rssi += sta_info_le.rssi[i];
+ count_rssi++;
+ }
+ }
+ if (count_rssi) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->chains = count_rssi;
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ total_rssi /= count_rssi;
+ sinfo->signal = total_rssi;
+ }
}
done:
brcmf_dbg(TRACE, "Exit\n");
return err;
}
+static int
+brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
+ int idx, u8 *mac, struct station_info *sinfo)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter, idx %d\n", idx);
+
+ if (idx == 0) {
+ cfg->assoclist.count = cpu_to_le32(BRCMF_MAX_ASSOCLIST);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_ASSOCLIST,
+ &cfg->assoclist,
+ sizeof(cfg->assoclist));
+ if (err) {
+ brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
+ err);
+ cfg->assoclist.count = 0;
+ return -EOPNOTSUPP;
+ }
+ }
+ if (idx < le32_to_cpu(cfg->assoclist.count)) {
+ memcpy(mac, cfg->assoclist.mac[idx], ETH_ALEN);
+ return brcmf_cfg80211_get_station(wiphy, ndev, mac, sinfo);
+ }
+ return -ENOENT;
+}
+
static s32
brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
bool enabled, s32 timeout)
@@ -4199,8 +4250,8 @@
brcmf_dbg(TRACE, "GO mode configuration complete\n");
}
- clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+ brcmf_net_setcarrier(ifp, true);
exit:
if ((err) && (!mbss)) {
@@ -4264,8 +4315,8 @@
}
brcmf_set_mpc(ifp, 1);
brcmf_configure_arp_offload(ifp, true);
- set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+ brcmf_net_setcarrier(ifp, false);
return err;
}
@@ -4597,6 +4648,7 @@
.join_ibss = brcmf_cfg80211_join_ibss,
.leave_ibss = brcmf_cfg80211_leave_ibss,
.get_station = brcmf_cfg80211_get_station,
+ .dump_station = brcmf_cfg80211_dump_station,
.set_tx_power = brcmf_cfg80211_set_tx_power,
.get_tx_power = brcmf_cfg80211_get_tx_power,
.add_key = brcmf_cfg80211_add_key,
@@ -4974,6 +5026,7 @@
&ifp->vif->sme_state);
} else
brcmf_bss_connect_done(cfg, ndev, e, true);
+ brcmf_net_setcarrier(ifp, true);
} else if (brcmf_is_linkdown(e)) {
brcmf_dbg(CONN, "Linkdown\n");
if (!brcmf_is_ibssmode(ifp->vif)) {
@@ -4983,6 +5036,7 @@
brcmf_init_prof(ndev_to_prof(ndev));
if (ndev != cfg_to_ndev(cfg))
complete(&cfg->vif_disabled);
+ brcmf_net_setcarrier(ifp, false);
} else if (brcmf_is_nonetwork(cfg, e)) {
if (brcmf_is_ibssmode(ifp->vif))
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -6238,6 +6292,17 @@
else
*cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
+ /* p2p might require that "if-events" get processed by fweh. So
+ * activate the already registered event handlers now and activate
+ * the rest when initialization has completed. drvr->config needs to
+ * be assigned before activating events.
+ */
+ drvr->config = cfg;
+ err = brcmf_fweh_activate_events(ifp);
+ if (err) {
+ brcmf_err("FWEH activation failed (%d)\n", err);
+ goto wiphy_unreg_out;
+ }
err = brcmf_p2p_attach(cfg, p2pdev_forced);
if (err) {
@@ -6260,6 +6325,13 @@
brcmf_notify_tdls_peer_event);
}
+ /* (re-) activate FWEH event handling */
+ err = brcmf_fweh_activate_events(ifp);
+ if (err) {
+ brcmf_err("FWEH activation failed (%d)\n", err);
+ goto wiphy_unreg_out;
+ }
+
return cfg;
wiphy_unreg_out:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
index 3f5e550..6a878c8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -143,7 +143,6 @@
* @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
* @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
* @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
- * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
* @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
*/
enum brcmf_vif_status {
@@ -151,7 +150,6 @@
BRCMF_VIF_STATUS_CONNECTING,
BRCMF_VIF_STATUS_CONNECTED,
BRCMF_VIF_STATUS_DISCONNECTING,
- BRCMF_VIF_STATUS_AP_CREATING,
BRCMF_VIF_STATUS_AP_CREATED
};
@@ -407,6 +405,7 @@
struct brcmu_d11inf d11inf;
bool wowl_enabled;
u32 pre_wowl_pmmode;
+ struct brcmf_assoclist_le assoclist;
};
/**
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index ffc3ace..f04833d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -682,6 +682,7 @@
case BRCM_CC_43570_CHIP_ID:
case BRCM_CC_4358_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
+ case BRCM_CC_4371_CHIP_ID:
return 0x180000;
case BRCM_CC_4365_CHIP_ID:
case BRCM_CC_4366_CHIP_ID:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h
index 0d39d80..21c7488 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.h
@@ -17,4 +17,7 @@
extern const u8 ALLFFMAC[ETH_ALEN];
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
#endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index 8c2a280..b5ab98e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -33,6 +33,7 @@
#include "feature.h"
#include "proto.h"
#include "pcie.h"
+#include "common.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -634,8 +635,7 @@
brcmf_cfg80211_down(ndev);
- /* Set state and stop OS transmissions */
- netif_stop_queue(ndev);
+ brcmf_net_setcarrier(ifp, false);
return 0;
}
@@ -669,8 +669,8 @@
return -EIO;
}
- /* Allow transmit calls */
- netif_start_queue(ndev);
+ /* Clear, carrier, set when connected or AP mode. */
+ netif_carrier_off(ndev);
return 0;
}
@@ -735,6 +735,24 @@
brcmf_cfg80211_free_netdev(ndev);
}
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
+{
+ struct net_device *ndev;
+
+ brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on);
+
+ ndev = ifp->ndev;
+ brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on);
+ if (on) {
+ if (!netif_carrier_ok(ndev))
+ netif_carrier_on(ndev);
+
+ } else {
+ if (netif_carrier_ok(ndev))
+ netif_carrier_off(ndev);
+ }
+}
+
static int brcmf_net_p2p_open(struct net_device *ndev)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -828,8 +846,8 @@
} else {
brcmf_dbg(INFO, "allocate netdev interface\n");
/* Allocate netdev, including space for private structure */
- ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
- ether_setup);
+ ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name,
+ NET_NAME_UNKNOWN, ether_setup);
if (!ndev)
return ERR_PTR(-ENOMEM);
@@ -957,8 +975,8 @@
drvr->bus_if = dev_get_drvdata(dev);
drvr->bus_if->drvr = drvr;
- /* create device debugfs folder */
- brcmf_debugfs_attach(drvr);
+ /* attach debug facilities */
+ brcmf_debug_attach(drvr);
/* Attach and link in the protocol */
ret = brcmf_proto_attach(drvr);
@@ -1021,12 +1039,7 @@
if (IS_ERR(ifp))
return PTR_ERR(ifp);
- if (brcmf_p2p_enable)
- p2p_ifp = brcmf_add_if(drvr, 1, 0, false, "p2p%d", NULL);
- else
- p2p_ifp = NULL;
- if (IS_ERR(p2p_ifp))
- p2p_ifp = NULL;
+ p2p_ifp = NULL;
/* signal bus ready */
brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
@@ -1060,11 +1073,13 @@
goto fail;
}
- ret = brcmf_fweh_activate_events(ifp);
- if (ret < 0)
- goto fail;
-
ret = brcmf_net_attach(ifp, false);
+
+ if ((!ret) && (brcmf_p2p_enable)) {
+ p2p_ifp = drvr->iflist[1];
+ if (p2p_ifp)
+ ret = brcmf_net_p2p_attach(p2p_ifp);
+ }
fail:
if (ret < 0) {
brcmf_err("failed: %d\n", ret);
@@ -1076,20 +1091,12 @@
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
- if (drvr->iflist[0]) {
+ if (ifp)
brcmf_net_detach(ifp->ndev);
- drvr->iflist[0] = NULL;
- }
- if (p2p_ifp) {
+ if (p2p_ifp)
brcmf_net_detach(p2p_ifp->ndev);
- drvr->iflist[1] = NULL;
- }
return ret;
}
- if ((brcmf_p2p_enable) && (p2p_ifp))
- if (brcmf_net_p2p_attach(p2p_ifp) < 0)
- brcmf_p2p_enable = 0;
-
return 0;
}
@@ -1155,7 +1162,7 @@
brcmf_proto_detach(drvr);
- brcmf_debugfs_detach(drvr);
+ brcmf_debug_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index d81ff95..2f9101b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -154,10 +154,13 @@
* netif stopped due to firmware signalling flow control.
* @BRCMF_NETIF_STOP_REASON_FLOW:
* netif stopped due to flowring full.
+ * @BRCMF_NETIF_STOP_REASON_DISCONNECTED:
+ * netif stopped due to not being connected (STA mode).
*/
enum brcmf_netif_stop_reason {
- BRCMF_NETIF_STOP_REASON_FWS_FC = 1,
- BRCMF_NETIF_STOP_REASON_FLOW = 2
+ BRCMF_NETIF_STOP_REASON_FWS_FC = BIT(0),
+ BRCMF_NETIF_STOP_REASON_FLOW = BIT(1),
+ BRCMF_NETIF_STOP_REASON_DISCONNECTED = BIT(2)
};
/**
@@ -213,8 +216,6 @@
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
-
-/* Sets dongle media info (drv_version, mac address). */
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
#endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
index 2d6d005..1299dcc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
@@ -16,15 +16,45 @@
#include <linux/debugfs.h>
#include <linux/netdevice.h>
#include <linux/module.h>
+#include <linux/devcoredump.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include "core.h"
#include "bus.h"
+#include "fweh.h"
#include "debug.h"
static struct dentry *root_folder;
+static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+ size_t len)
+{
+ void *dump;
+ size_t ramsize;
+
+ ramsize = brcmf_bus_get_ramsize(bus);
+ if (ramsize) {
+ dump = vzalloc(len + ramsize);
+ if (!dump)
+ return -ENOMEM;
+ memcpy(dump, data, len);
+ brcmf_bus_get_memdump(bus, dump + len, ramsize);
+ dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+ }
+ return 0;
+}
+
+static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *evtmsg,
+ void *data)
+{
+ brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
+
+ return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+ evtmsg->datalen);
+}
+
void brcmf_debugfs_init(void)
{
root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
@@ -41,7 +71,7 @@
root_folder = NULL;
}
-int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+int brcmf_debug_attach(struct brcmf_pub *drvr)
{
struct device *dev = drvr->bus_if->dev;
@@ -49,12 +79,18 @@
return -ENODEV;
drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+ if (IS_ERR(drvr->dbgfs_dir))
+ return PTR_ERR(drvr->dbgfs_dir);
- return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
+
+ return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
+ brcmf_debug_psm_watchdog_notify);
}
-void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+void brcmf_debug_detach(struct brcmf_pub *drvr)
{
+ brcmf_fweh_unregister(drvr, BRCMF_E_PSM_WATCHDOG);
+
if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
debugfs_remove_recursive(drvr->dbgfs_dir);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
index 48648ca..d0d9676 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
@@ -109,8 +109,8 @@
#ifdef DEBUG
void brcmf_debugfs_init(void);
void brcmf_debugfs_exit(void);
-int brcmf_debugfs_attach(struct brcmf_pub *drvr);
-void brcmf_debugfs_detach(struct brcmf_pub *drvr);
+int brcmf_debug_attach(struct brcmf_pub *drvr);
+void brcmf_debug_detach(struct brcmf_pub *drvr);
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data));
@@ -121,11 +121,11 @@
static inline void brcmf_debugfs_exit(void)
{
}
-static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+static inline int brcmf_debug_attach(struct brcmf_pub *drvr)
{
return 0;
}
-static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+static inline void brcmf_debug_detach(struct brcmf_pub *drvr)
{
}
static inline
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 971920f..4248f3c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -29,7 +29,7 @@
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
-module_param_string(firmware_path, brcmf_firmware_path,
+module_param_string(alternative_fw_path, brcmf_firmware_path,
BRCMF_FW_PATH_LEN, 0440);
enum nvram_parser_state {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 383d6fa..3878b6f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -213,7 +213,8 @@
is_p2pdev, emsg->ifname, emsg->addr);
if (IS_ERR(ifp))
return;
- brcmf_fws_add_interface(ifp);
+ if (!is_p2pdev)
+ brcmf_fws_add_interface(ifp);
if (!drvr->fweh.evt_handler[BRCMF_E_IF])
if (brcmf_net_attach(ifp, false) < 0)
return;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 5434dcf..b20fc0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -72,6 +72,7 @@
#define BRCMF_C_GET_BSS_INFO 136
#define BRCMF_C_GET_BANDLIST 140
#define BRCMF_C_SET_SCB_TIMEOUT 158
+#define BRCMF_C_GET_ASSOCLIST 159
#define BRCMF_C_GET_PHYLIST 180
#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 297911f..daa427b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -119,6 +119,8 @@
#define BRCMF_COUNTRY_BUF_SZ 4
#define BRCMF_ANT_MAX 4
+#define BRCMF_MAX_ASSOCLIST 128
+
/* join preference types for join_pref iovar */
enum brcmf_join_pref_types {
BRCMF_JOIN_PREF_RSSI = 1,
@@ -621,4 +623,15 @@
__le32 nvramrev;
};
+/**
+ * struct brcmf_assoclist_le - request assoc list.
+ *
+ * @count: indicates number of stations.
+ * @mac: MAC addresses of stations.
+ */
+struct brcmf_assoclist_le {
+ __le32 count;
+ u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN];
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 7eff9de..44e618f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -873,9 +873,6 @@
commonring = msgbuf->flowrings[flowid];
atomic_dec(&commonring->outstanding_tx);
- /* Hante: i believe this was a bug as tx_status->msg.ifidx was used
- * in brcmf_txfinalize as index in drvr->iflist. Can you confirm/deny?
- */
brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
skb, true);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 37a8c352..d224b3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -2353,83 +2353,30 @@
* brcmf_p2p_attach() - attach for P2P.
*
* @cfg: driver private data for cfg80211 interface.
+ * @p2pdev_forced: create p2p device interface at attach.
*/
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
{
- struct brcmf_if *pri_ifp;
- struct brcmf_if *p2p_ifp;
- struct brcmf_cfg80211_vif *p2p_vif;
struct brcmf_p2p_info *p2p;
- struct brcmf_pub *drvr;
- s32 bssidx;
+ struct brcmf_if *pri_ifp;
s32 err = 0;
+ void *err_ptr;
p2p = &cfg->p2p;
p2p->cfg = cfg;
- drvr = cfg->pub;
-
- pri_ifp = brcmf_get_ifp(drvr, 0);
+ pri_ifp = brcmf_get_ifp(cfg->pub, 0);
p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
if (p2pdev_forced) {
- p2p_ifp = drvr->iflist[1];
+ err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
+ if (IS_ERR(err_ptr)) {
+ brcmf_err("P2P device creation failed.\n");
+ err = PTR_ERR(err_ptr);
+ }
} else {
- p2p_ifp = NULL;
p2p->p2pdev_dynamically = true;
}
- if (p2p_ifp) {
- p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
- false);
- if (IS_ERR(p2p_vif)) {
- brcmf_err("could not create discovery vif\n");
- err = -ENOMEM;
- goto exit;
- }
-
- p2p_vif->ifp = p2p_ifp;
- p2p_ifp->vif = p2p_vif;
- p2p_vif->wdev.netdev = p2p_ifp->ndev;
- p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
- SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
-
- p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
-
- brcmf_p2p_generate_bss_mac(p2p, NULL);
- memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
- brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
-
- brcmf_fweh_p2pdev_setup(pri_ifp, true);
-
- /* Initialize P2P Discovery in the firmware */
- err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
- if (err < 0) {
- brcmf_err("set p2p_disc error\n");
- brcmf_free_vif(p2p_vif);
- goto exit;
- }
- /* obtain bsscfg index for P2P discovery */
- err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
- if (err < 0) {
- brcmf_err("retrieving discover bsscfg index failed\n");
- brcmf_free_vif(p2p_vif);
- goto exit;
- }
- /* Verify that firmware uses same bssidx as driver !! */
- if (p2p_ifp->bssidx != bssidx) {
- brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
- bssidx, p2p_ifp->bssidx);
- brcmf_free_vif(p2p_vif);
- goto exit;
- }
-
- init_completion(&p2p->send_af_done);
- INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
- init_completion(&p2p->afx_hdl.act_frm_scan);
- init_completion(&p2p->wait_next_af);
-exit:
- brcmf_fweh_p2pdev_setup(pri_ifp, false);
- }
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 30baf35..83d8042 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -59,6 +59,8 @@
#define BRCMF_PCIE_4365_NVRAM_NAME "brcm/brcmfmac4365b-pcie.txt"
#define BRCMF_PCIE_4366_FW_NAME "brcm/brcmfmac4366b-pcie.bin"
#define BRCMF_PCIE_4366_NVRAM_NAME "brcm/brcmfmac4366b-pcie.txt"
+#define BRCMF_PCIE_4371_FW_NAME "brcm/brcmfmac4371-pcie.bin"
+#define BRCMF_PCIE_4371_NVRAM_NAME "brcm/brcmfmac4371-pcie.txt"
#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
@@ -212,6 +214,8 @@
MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME);
struct brcmf_pcie_console {
@@ -448,6 +452,47 @@
}
+static void
+brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ void *dstaddr, u32 len)
+{
+ void __iomem *address = devinfo->tcm + mem_offset;
+ __le32 *dst32;
+ __le16 *dst16;
+ u8 *dst8;
+
+ if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
+ if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
+ dst8 = (u8 *)dstaddr;
+ while (len) {
+ *dst8 = ioread8(address);
+ address++;
+ dst8++;
+ len--;
+ }
+ } else {
+ len = len / 2;
+ dst16 = (__le16 *)dstaddr;
+ while (len) {
+ *dst16 = cpu_to_le16(ioread16(address));
+ address += 2;
+ dst16++;
+ len--;
+ }
+ }
+ } else {
+ len = len / 4;
+ dst32 = (__le32 *)dstaddr;
+ while (len) {
+ *dst32 = cpu_to_le32(ioread32(address));
+ address += 4;
+ dst32++;
+ len--;
+ }
+ }
+}
+
+
#define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
CHIPCREGOFFS(reg), value)
@@ -1352,12 +1397,36 @@
}
+static size_t brcmf_pcie_get_ramsize(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+ return devinfo->ci->ramsize - devinfo->ci->srsize;
+}
+
+
+static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+ brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
+ brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
+ return 0;
+}
+
+
static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
.txctl = brcmf_pcie_tx_ctlpkt,
.rxctl = brcmf_pcie_rx_ctlpkt,
.wowl_config = brcmf_pcie_wowl_config,
+ .get_ramsize = brcmf_pcie_get_ramsize,
+ .get_memdump = brcmf_pcie_get_memdump,
};
@@ -1456,6 +1525,10 @@
fw_name = BRCMF_PCIE_4366_FW_NAME;
nvram_name = BRCMF_PCIE_4366_NVRAM_NAME;
break;
+ case BRCM_CC_4371_CHIP_ID:
+ fw_name = BRCMF_PCIE_4371_FW_NAME;
+ nvram_name = BRCMF_PCIE_4371_NVRAM_NAME;
+ break;
default:
brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
return -ENODEV;
@@ -1995,6 +2068,7 @@
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index 7f574f2..7e74ac3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -3539,6 +3539,51 @@
return err;
}
+static size_t brcmf_sdio_bus_get_ramsize(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+
+ return bus->ci->ramsize - bus->ci->srsize;
+}
+
+static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
+ size_t mem_size)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+ int err;
+ int address;
+ int offset;
+ int len;
+
+ brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase,
+ mem_size);
+
+ address = bus->ci->rambase;
+ offset = err = 0;
+ sdio_claim_host(sdiodev->func[1]);
+ while (offset < mem_size) {
+ len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
+ mem_size - offset;
+ err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len);
+ if (err) {
+ brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+ err, len, address);
+ goto done;
+ }
+ data += len;
+ offset += len;
+ address += len;
+ }
+
+done:
+ sdio_release_host(sdiodev->func[1]);
+ return err;
+}
+
void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
{
if (!bus->dpc_triggered) {
@@ -3987,7 +4032,9 @@
.txctl = brcmf_sdio_bus_txctl,
.rxctl = brcmf_sdio_bus_rxctl,
.gettxq = brcmf_sdio_bus_gettxq,
- .wowl_config = brcmf_sdio_wowl_config
+ .wowl_config = brcmf_sdio_wowl_config,
+ .get_ramsize = brcmf_sdio_bus_get_ramsize,
+ .get_memdump = brcmf_sdio_bus_get_memdump,
};
static void brcmf_sdio_firmware_callback(struct device *dev,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index daba86d..689e64d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -144,6 +144,7 @@
struct usb_device *usbdev;
struct device *dev;
+ struct mutex dev_init_lock;
int ctl_in_pipe, ctl_out_pipe;
struct urb *ctl_urb; /* URB for control endpoint */
@@ -1204,6 +1205,8 @@
int ret;
brcmf_dbg(USB, "Start fw downloading\n");
+
+ devinfo = bus->bus_priv.usb->devinfo;
ret = check_file(fw->data);
if (ret < 0) {
brcmf_err("invalid firmware\n");
@@ -1211,7 +1214,6 @@
goto error;
}
- devinfo = bus->bus_priv.usb->devinfo;
devinfo->image = fw->data;
devinfo->image_len = fw->size;
@@ -1224,9 +1226,11 @@
if (ret)
goto error;
+ mutex_unlock(&devinfo->dev_init_lock);
return;
error:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+ mutex_unlock(&devinfo->dev_init_lock);
device_release_driver(dev);
}
@@ -1264,6 +1268,7 @@
if (ret)
goto fail;
/* we are done */
+ mutex_unlock(&devinfo->dev_init_lock);
return 0;
}
bus->chip = bus_pub->devid;
@@ -1317,6 +1322,12 @@
devinfo->usbdev = usb;
devinfo->dev = &usb->dev;
+ /* Take an init lock, to protect for disconnect while still loading.
+ * Necessary because of the asynchronous firmware load construction
+ */
+ mutex_init(&devinfo->dev_init_lock);
+ mutex_lock(&devinfo->dev_init_lock);
+
usb_set_intfdata(intf, devinfo);
/* Check that the device supports only one configuration */
@@ -1391,6 +1402,7 @@
return 0;
fail:
+ mutex_unlock(&devinfo->dev_init_lock);
kfree(devinfo);
usb_set_intfdata(intf, NULL);
return ret;
@@ -1403,8 +1415,19 @@
brcmf_dbg(USB, "Enter\n");
devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
- brcmf_usb_disconnect_cb(devinfo);
- kfree(devinfo);
+
+ if (devinfo) {
+ mutex_lock(&devinfo->dev_init_lock);
+ /* Make sure that devinfo still exists. Firmware probe routines
+ * may have released the device and cleared the intfdata.
+ */
+ if (!usb_get_intfdata(intf))
+ goto done;
+
+ brcmf_usb_disconnect_cb(devinfo);
+ kfree(devinfo);
+ }
+done:
brcmf_dbg(USB, "Exit\n");
}
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index d823734..aa06ea2 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -50,6 +50,7 @@
#define BRCM_CC_43602_CHIP_ID 43602
#define BRCM_CC_4365_CHIP_ID 0x4365
#define BRCM_CC_4366_CHIP_ID 0x4366
+#define BRCM_CC_4371_CHIP_ID 0x4371
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
@@ -75,6 +76,7 @@
#define BRCM_PCIE_4366_DEVICE_ID 0x43c3
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
+#define BRCM_PCIE_4371_DEVICE_ID 0x440d
/* brcmsmac IDs */
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 7fcd2c24..13c97f6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -630,6 +630,7 @@
kfree(mvm->d3_resume_sram);
if (mvm->nd_config) {
kfree(mvm->nd_config->match_sets);
+ kfree(mvm->nd_config->scan_plans);
kfree(mvm->nd_config);
mvm->nd_config = NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 4a1f9af..cee4f26 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1271,12 +1271,12 @@
params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms);
- if (req->interval > U16_MAX) {
+ if (req->scan_plans[0].interval > U16_MAX) {
IWL_DEBUG_SCAN(mvm,
"interval value is > 16-bits, set to max possible\n");
params.interval = U16_MAX;
} else {
- params.interval = req->interval / MSEC_PER_SEC;
+ params.interval = req->scan_plans[0].interval;
}
/* In theory, LMAC scans can handle a 32-bit delay, but since
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 317d991..279167d 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -33,12 +33,12 @@
mwifiex_pcie.
config MWIFIEX_USB
- tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
+ tristate "Marvell WiFi-Ex Driver for USB8766/8797/8997"
depends on MWIFIEX && USB
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8797/8897/8997 chipset with USB interface.
+ 8797/8997 chipset with USB interface.
If you choose to build it as a module, it will be called
mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 30cbafb..b7ac45f 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2374,7 +2374,7 @@
* CFG802.11 operation handler for scan request.
*
* This function issues a scan request to the firmware based upon
- * the user specified scan configuration. On successfull completion,
+ * the user specified scan configuration. On successful completion,
* it also informs the results.
*/
static int
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 5583856..9824d8d 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -856,6 +856,56 @@
return ret;
}
+static ssize_t
+mwifiex_timeshare_coex_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv = file->private_data;
+ char buf[3];
+ bool timeshare_coex;
+ int ret;
+ unsigned int len;
+
+ if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+ return -EOPNOTSUPP;
+
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+ HostCmd_ACT_GEN_GET, 0, ×hare_coex, true);
+ if (ret)
+ return ret;
+
+ len = sprintf(buf, "%d\n", timeshare_coex);
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static ssize_t
+mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ bool timeshare_coex;
+ struct mwifiex_private *priv = file->private_data;
+ char kbuf[16];
+ int ret;
+
+ if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+ return -EOPNOTSUPP;
+
+ memset(kbuf, 0, sizeof(kbuf));
+
+ if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
+ return -EFAULT;
+
+ if (strtobool(kbuf, ×hare_coex))
+ return -EINVAL;
+
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+ HostCmd_ACT_GEN_SET, 0, ×hare_coex, true);
+ if (ret)
+ return ret;
+ else
+ return count;
+}
+
#define MWIFIEX_DFS_ADD_FILE(name) do { \
if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \
priv, &mwifiex_dfs_##name##_fops)) \
@@ -892,6 +942,7 @@
MWIFIEX_DFS_FILE_OPS(hscfg);
MWIFIEX_DFS_FILE_OPS(histogram);
MWIFIEX_DFS_FILE_OPS(debug_mask);
+MWIFIEX_DFS_FILE_OPS(timeshare_coex);
/*
* This function creates the debug FS directory structure and the files.
@@ -918,6 +969,7 @@
MWIFIEX_DFS_ADD_FILE(hscfg);
MWIFIEX_DFS_ADD_FILE(histogram);
MWIFIEX_DFS_ADD_FILE(debug_mask);
+ MWIFIEX_DFS_ADD_FILE(timeshare_coex);
}
/*
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 0e6f029..1e1e81a 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -101,6 +101,9 @@
#define FIRMWARE_READY_SDIO 0xfedc
#define FIRMWARE_READY_PCIE 0xfedcba00
+#define MWIFIEX_COEX_MODE_TIMESHARE 0x01
+#define MWIFIEX_COEX_MODE_SPATIAL 0x82
+
enum mwifiex_usb_ep {
MWIFIEX_USB_EP_CMD_EVENT = 1,
MWIFIEX_USB_EP_DATA = 2,
@@ -163,6 +166,7 @@
#define TLV_TYPE_CHANRPT_11H_BASIC (PROPRIETARY_TLV_BASE_ID + 91)
#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
+#define TLV_TYPE_ROBUST_COEX (PROPRIETARY_TLV_BASE_ID + 96)
#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
@@ -354,6 +358,7 @@
#define HostCmd_CMD_AMSDU_AGGR_CTRL 0x00df
#define HostCmd_CMD_TXPWR_CFG 0x00d1
#define HostCmd_CMD_TX_RATE_CFG 0x00d6
+#define HostCmd_CMD_ROBUST_COEX 0x00e0
#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4
#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5
#define HostCmd_CMD_P2P_MODE_CFG 0x00eb
@@ -1877,6 +1882,11 @@
u8 reserved;
} __packed;
+struct mwifiex_ie_types_robust_coex {
+ struct mwifiex_ie_types_header header;
+ __le32 mode;
+} __packed;
+
struct host_cmd_ds_version_ext {
u8 version_str_sel;
char version_str[128];
@@ -2078,6 +2088,11 @@
__le16 policy;
} __packed;
+struct host_cmd_ds_robust_coex {
+ __le16 action;
+ __le16 reserved;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2147,6 +2162,7 @@
struct host_cmd_ds_chan_rpt_req chan_rpt_req;
struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
struct host_cmd_ds_multi_chan_policy mc_policy;
+ struct host_cmd_ds_robust_coex coex;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 6e3faa7..969ca1e 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -1199,6 +1199,7 @@
.ndo_stop = mwifiex_close,
.ndo_start_xmit = mwifiex_hard_start_xmit,
.ndo_set_mac_address = mwifiex_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = mwifiex_tx_timeout,
.ndo_get_stats = mwifiex_get_stats,
.ndo_set_rx_mode = mwifiex_set_multicast_list,
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 504b321..e486867 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1531,6 +1531,33 @@
return 0;
}
+static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, bool *is_timeshare)
+{
+ struct host_cmd_ds_robust_coex *coex = &cmd->params.coex;
+ struct mwifiex_ie_types_robust_coex *coex_tlv;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_ROBUST_COEX);
+ cmd->size = cpu_to_le16(sizeof(*coex) + sizeof(*coex_tlv) + S_DS_GEN);
+
+ coex->action = cpu_to_le16(cmd_action);
+ coex_tlv = (struct mwifiex_ie_types_robust_coex *)
+ ((u8 *)coex + sizeof(*coex));
+ coex_tlv->header.type = cpu_to_le16(TLV_TYPE_ROBUST_COEX);
+ coex_tlv->header.len = cpu_to_le16(sizeof(coex_tlv->mode));
+
+ if (coex->action == HostCmd_ACT_GEN_GET)
+ return 0;
+
+ if (*is_timeshare)
+ coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_TIMESHARE);
+ else
+ coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_SPATIAL);
+
+ return 0;
+}
+
static int
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
@@ -2040,6 +2067,10 @@
ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_ROBUST_COEX:
+ ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index d0961635..9ac7aa2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -1007,6 +1007,28 @@
return 0;
}
+static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp,
+ bool *is_timeshare)
+{
+ struct host_cmd_ds_robust_coex *coex = &resp->params.coex;
+ struct mwifiex_ie_types_robust_coex *coex_tlv;
+ u16 action = le16_to_cpu(coex->action);
+ u32 mode;
+
+ coex_tlv = (struct mwifiex_ie_types_robust_coex
+ *)((u8 *)coex + sizeof(struct host_cmd_ds_robust_coex));
+ if (action == HostCmd_ACT_GEN_GET) {
+ mode = le32_to_cpu(coex_tlv->mode);
+ if (mode == MWIFIEX_COEX_MODE_TIMESHARE)
+ *is_timeshare = true;
+ else
+ *is_timeshare = false;
+ }
+
+ return 0;
+}
+
/*
* This function handles the command responses.
*
@@ -1213,6 +1235,9 @@
break;
case HostCmd_CMD_TDLS_CONFIG:
break;
+ case HostCmd_CMD_ROBUST_COEX:
+ ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
+ break;
default:
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 4d5a6e3..759a6ad 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -846,22 +846,6 @@
{
enum state_11d_t state_11d;
- if (mwifiex_del_mgmt_ies(priv))
- mwifiex_dbg(priv->adapter, ERROR,
- "Failed to delete mgmt IEs!\n");
-
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL, true)) {
- mwifiex_dbg(priv->adapter, ERROR, "Failed to stop the BSS\n");
- return -1;
- }
-
- if (mwifiex_send_cmd(priv, HOST_CMD_APCMD_SYS_RESET,
- HostCmd_ACT_GEN_SET, 0, NULL, true)) {
- mwifiex_dbg(priv->adapter, ERROR, "Failed to reset BSS\n");
- return -1;
- }
-
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_BSS_PARAMS_I, bss_cfg, false)) {
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 9f5356e..e43aff9 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -42,11 +42,6 @@
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8801_PID_2,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0xff)},
- /* 8897 */
- {USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
- {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
- USB_CLASS_VENDOR_SPEC,
- USB_SUBCLASS_VENDOR_SPEC, 0xff)},
/* 8997 */
{USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
@@ -403,14 +398,12 @@
case USB8766_PID_1:
case USB8797_PID_1:
case USB8801_PID_1:
- case USB8897_PID_1:
case USB8997_PID_1:
card->usb_boot_state = USB8XXX_FW_DNLD;
break;
case USB8766_PID_2:
case USB8797_PID_2:
case USB8801_PID_2:
- case USB8897_PID_2:
case USB8997_PID_2:
card->usb_boot_state = USB8XXX_FW_READY;
break;
@@ -964,12 +957,6 @@
strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
adapter->ext_scan = true;
break;
- case USB8897_PID_1:
- case USB8897_PID_2:
- adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
- strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
- adapter->ext_scan = true;
- break;
case USB8766_PID_1:
case USB8766_PID_2:
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
@@ -1277,5 +1264,4 @@
MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index bab10ee..b4e9246 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -28,11 +28,9 @@
#define USB8766_PID_2 0x2042
#define USB8797_PID_1 0x2043
#define USB8797_PID_2 0x2044
-#define USB8897_PID_1 0x2045
-#define USB8897_PID_2 0x2046
#define USB8801_PID_1 0x2049
#define USB8801_PID_2 0x204a
-#define USB8997_PID_1 0x204d
+#define USB8997_PID_1 0x2052
#define USB8997_PID_2 0x204e
@@ -48,7 +46,6 @@
#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
-#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
#define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin"
#define FW_DNLD_TX_BUF_SIZE 620
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 57c13ec..acccd67 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -684,7 +684,7 @@
if (!memcmp(ra_list->ra, mac, ETH_ALEN))
continue;
- if (ra_list && ra_list->tx_paused != tx_pause) {
+ if (ra_list->tx_paused != tx_pause) {
pkt_cnt += ra_list->total_pkt_count;
ra_list->tx_paused = tx_pause;
if (tx_pause)
diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile
new file mode 100644
index 0000000..9c78deb
--- /dev/null
+++ b/drivers/net/wireless/realtek/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Linux Wireless network device drivers for Realtek units
+#
+
+obj-$(CONFIG_RTL8180) += rtl818x/
+obj-$(CONFIG_RTL8187) += rtl818x/
+obj-$(CONFIG_RTLWIFI) += rtlwifi/
+obj-$(CONFIG_RTL8XXXU) += rtl8xxxu/
+
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/realtek/rtl818x/Kconfig
similarity index 100%
rename from drivers/net/wireless/rtl818x/Kconfig
rename to drivers/net/wireless/realtek/rtl818x/Kconfig
diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/realtek/rtl818x/Makefile
similarity index 100%
rename from drivers/net/wireless/rtl818x/Makefile
rename to drivers/net/wireless/realtek/rtl818x/Makefile
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
similarity index 69%
rename from drivers/net/wireless/rtl818x/rtl8180/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 21005bd..2966681 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_RTL8180) += rtl818x_pci.o
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/dev.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/grf5101.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/grf5101.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/max2820.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/max2820.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/sa2400.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/sa2400.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
similarity index 62%
rename from drivers/net/wireless/rtl818x/rtl8187/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index 7b62992..ff07491 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_RTL8187) += rtl8187.o
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/dev.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/leds.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/leds.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rfkill.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rfkill.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/realtek/rtl818x/rtl818x.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl818x.h
rename to drivers/net/wireless/realtek/rtl818x/rtl818x.h
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
new file mode 100644
index 0000000..dd4d626
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -0,0 +1,34 @@
+#
+# RTL8XXXU Wireless LAN device configuration
+#
+config RTL8XXXU
+ tristate "RTL8723AU/RTL8188[CR]U/RTL819[12]CU (mac80211) support"
+ depends on MAC80211 && USB
+ ---help---
+ This is an alternative driver for various Realtek RTL8XXX
+ parts written to utilize the Linux mac80211 stack.
+ The driver is known to work with a number of RTL8723AU,
+ RL8188CU, RTL8188RU, RTL8191CU, and RTL8192CU devices
+
+ This driver is under development and has a limited feature
+ set. In particular it does not yet support 40MHz channels
+ and power management. However it should have a smaller
+ memory footprint than the vendor drivers and benetifs
+ from the in kernel mac80211 stack.
+
+ It can coexist with drivers from drivers/staging/rtl8723au,
+ drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi,
+ but you will need to control which module you wish to load.
+
+ To compile this driver as a module, choose M here: the module will
+ be called r8xxxu. If unsure, say N.
+
+config RTL8XXXU_UNTESTED
+ bool "Include support for untested Realtek 8xxx USB devices (EXPERIMENTAL)"
+ depends on RTL8XXXU
+ ---help---
+ This option enables detection of Realtek 8723/8188/8191/8192 WiFi
+ USB devices which have not been tested directly by the driver
+ author or reported to be working by third parties.
+
+ Please report your results!
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
new file mode 100644
index 0000000..5dea3bb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
new file mode 100644
index 0000000..6aed923
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
@@ -0,0 +1,5993 @@
+/*
+ * RTL8XXXU mac80211 USB driver
+ *
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+#define DRIVER_NAME "rtl8xxxu"
+
+static int rtl8xxxu_debug;
+static bool rtl8xxxu_ht40_2g;
+
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
+MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
+
+module_param_named(debug, rtl8xxxu_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Set debug mask");
+module_param_named(ht40_2g, rtl8xxxu_ht40_2g, bool, 0600);
+MODULE_PARM_DESC(ht40_2g, "Enable HT40 support on the 2.4GHz band");
+
+#define USB_VENDOR_ID_REALTEK 0x0bda
+/* Minimum IEEE80211_MAX_FRAME_LEN */
+#define RTL_RX_BUFFER_SIZE IEEE80211_MAX_FRAME_LEN
+#define RTL8XXXU_RX_URBS 32
+#define RTL8XXXU_RX_URB_PENDING_WATER 8
+#define RTL8XXXU_TX_URBS 64
+#define RTL8XXXU_TX_URB_LOW_WATER 25
+#define RTL8XXXU_TX_URB_HIGH_WATER 32
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rx_urb *rx_urb);
+
+static struct ieee80211_rate rtl8xxxu_rates[] = {
+ { .bitrate = 10, .hw_value = DESC_RATE_1M, .flags = 0 },
+ { .bitrate = 20, .hw_value = DESC_RATE_2M, .flags = 0 },
+ { .bitrate = 55, .hw_value = DESC_RATE_5_5M, .flags = 0 },
+ { .bitrate = 110, .hw_value = DESC_RATE_11M, .flags = 0 },
+ { .bitrate = 60, .hw_value = DESC_RATE_6M, .flags = 0 },
+ { .bitrate = 90, .hw_value = DESC_RATE_9M, .flags = 0 },
+ { .bitrate = 120, .hw_value = DESC_RATE_12M, .flags = 0 },
+ { .bitrate = 180, .hw_value = DESC_RATE_18M, .flags = 0 },
+ { .bitrate = 240, .hw_value = DESC_RATE_24M, .flags = 0 },
+ { .bitrate = 360, .hw_value = DESC_RATE_36M, .flags = 0 },
+ { .bitrate = 480, .hw_value = DESC_RATE_48M, .flags = 0 },
+ { .bitrate = 540, .hw_value = DESC_RATE_54M, .flags = 0 },
+};
+
+static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+ .hw_value = 1, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+ .hw_value = 2, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+ .hw_value = 3, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+ .hw_value = 4, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+ .hw_value = 5, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+ .hw_value = 6, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+ .hw_value = 7, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+ .hw_value = 8, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+ .hw_value = 9, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+ .hw_value = 10, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+ .hw_value = 11, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+ .hw_value = 12, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+ .hw_value = 13, .max_power = 30 },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+ .hw_value = 14, .max_power = 30 }
+};
+
+static struct ieee80211_supported_band rtl8xxxu_supported_band = {
+ .channels = rtl8xxxu_channels_2g,
+ .n_channels = ARRAY_SIZE(rtl8xxxu_channels_2g),
+ .bitrates = rtl8xxxu_rates,
+ .n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
+};
+
+static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+ {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+ {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+ {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+ {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+ {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+ {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+ {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+ {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+ {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02200385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00390004},
+ {0x828, 0x00000000}, {0x82c, 0x00000000},
+ {0x830, 0x00000000}, {0x834, 0x00000000},
+ {0x838, 0x00000000}, {0x83c, 0x00000000},
+ {0x840, 0x00010000}, {0x844, 0x00000000},
+ {0x848, 0x00000000}, {0x84c, 0x00000000},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x569a569a}, {0x85c, 0x001b25a4},
+ {0x860, 0x66f60110}, {0x864, 0x061f0130},
+ {0x868, 0x00000000}, {0x86c, 0x32323200},
+ {0x870, 0x07000760}, {0x874, 0x22004000},
+ {0x878, 0x00000808}, {0x87c, 0x00000000},
+ {0x880, 0xc0083070}, {0x884, 0x000004d5},
+ {0x888, 0x00000000}, {0x88c, 0xccc000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x89c, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90c, 0x81121111},
+ {0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+ {0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+ {0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+ {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+ {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+ {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+ {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+ {0xa78, 0x00000900},
+ {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+ {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+ {0xc10, 0x08800000}, {0xc14, 0x40000100},
+ {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+ {0xc20, 0x00000000}, {0xc24, 0x00000000},
+ {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+ {0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
+ {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+ {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+ {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+ {0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+ {0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+ {0xc60, 0x00000000}, {0xc64, 0x7112848b},
+ {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+ {0xc70, 0x2c7f000d}, {0xc74, 0x018610db},
+ {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+ {0xc80, 0x40000100}, {0xc84, 0x20f60000},
+ {0xc88, 0x40000100}, {0xc8c, 0x20200000},
+ {0xc90, 0x00121820}, {0xc94, 0x00000000},
+ {0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+ {0xca0, 0x00000000}, {0xca4, 0x00000080},
+ {0xca8, 0x00000000}, {0xcac, 0x00000000},
+ {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+ {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+ {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+ {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+ {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+ {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+ {0xce0, 0x00222222}, {0xce4, 0x00000000},
+ {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+ {0xd00, 0x00080740}, {0xd04, 0x00020401},
+ {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+ {0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+ {0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00027293},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000000}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+ {0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+ {0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+ {0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+ {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+ {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+ {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+ {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+ {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+ {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+ {0xe5c, 0x28160d05}, {0xe60, 0x00000008},
+ {0xe68, 0x001b25a4}, {0xe6c, 0x631b25a0},
+ {0xe70, 0x631b25a0}, {0xe74, 0x081b25a0},
+ {0xe78, 0x081b25a0}, {0xe7c, 0x081b25a0},
+ {0xe80, 0x081b25a0}, {0xe84, 0x631b25a0},
+ {0xe88, 0x081b25a0}, {0xe8c, 0x631b25a0},
+ {0xed0, 0x631b25a0}, {0xed4, 0x631b25a0},
+ {0xed8, 0x631b25a0}, {0xedc, 0x001b25a0},
+ {0xee0, 0x001b25a0}, {0xeec, 0x6b1b25a0},
+ {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+ {0xf00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
+ {0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+ {0x800, 0x80040002}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10000330}, {0x814, 0x020c3d10},
+ {0x818, 0x02200385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00390004},
+ {0x828, 0x01000100}, {0x82c, 0x00390004},
+ {0x830, 0x27272727}, {0x834, 0x27272727},
+ {0x838, 0x27272727}, {0x83c, 0x27272727},
+ {0x840, 0x00010000}, {0x844, 0x00010000},
+ {0x848, 0x27272727}, {0x84c, 0x27272727},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x569a569a}, {0x85c, 0x0c1b25a4},
+ {0x860, 0x66e60230}, {0x864, 0x061f0130},
+ {0x868, 0x27272727}, {0x86c, 0x2b2b2b27},
+ {0x870, 0x07000700}, {0x874, 0x22184000},
+ {0x878, 0x08080808}, {0x87c, 0x00000000},
+ {0x880, 0xc0083070}, {0x884, 0x000004d5},
+ {0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x89c, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90c, 0x81121313},
+ {0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+ {0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+ {0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+ {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+ {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+ {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+ {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+ {0xc00, 0x48071d40}, {0xc04, 0x03a05633},
+ {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+ {0xc10, 0x08800000}, {0xc14, 0x40000100},
+ {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+ {0xc20, 0x00000000}, {0xc24, 0x00000000},
+ {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+ {0xc30, 0x69e9ac44}, {0xc34, 0x469652cf},
+ {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+ {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+ {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+ {0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+ {0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+ {0xc60, 0x00000000}, {0xc64, 0x5116848b},
+ {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+ {0xc70, 0x2c7f000d}, {0xc74, 0x2186115b},
+ {0xc78, 0x0000001f}, {0xc7c, 0x00b99612},
+ {0xc80, 0x40000100}, {0xc84, 0x20f60000},
+ {0xc88, 0x40000100}, {0xc8c, 0xa0e40000},
+ {0xc90, 0x00121820}, {0xc94, 0x00000000},
+ {0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+ {0xca0, 0x00000000}, {0xca4, 0x00000080},
+ {0xca8, 0x00000000}, {0xcac, 0x00000000},
+ {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+ {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+ {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+ {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+ {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+ {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+ {0xce0, 0x00222222}, {0xce4, 0x00000000},
+ {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+ {0xd00, 0x00080740}, {0xd04, 0x00020403},
+ {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+ {0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+ {0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00027293},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000000}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+ {0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+ {0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+ {0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+ {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+ {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+ {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+ {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+ {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+ {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+ {0xe5c, 0x28160d05}, {0xe60, 0x00000010},
+ {0xe68, 0x001b25a4}, {0xe6c, 0x63db25a4},
+ {0xe70, 0x63db25a4}, {0xe74, 0x0c1b25a4},
+ {0xe78, 0x0c1b25a4}, {0xe7c, 0x0c1b25a4},
+ {0xe80, 0x0c1b25a4}, {0xe84, 0x63db25a4},
+ {0xe88, 0x0c1b25a4}, {0xe8c, 0x63db25a4},
+ {0xed0, 0x63db25a4}, {0xed4, 0x63db25a4},
+ {0xed8, 0x63db25a4}, {0xedc, 0x001b25a4},
+ {0xee0, 0x001b25a4}, {0xeec, 0x6fdb25a4},
+ {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+ {0xf00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
+ {0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+ {0x040, 0x000c0004}, {0x800, 0x80040000},
+ {0x804, 0x00000001}, {0x808, 0x0000fc00},
+ {0x80c, 0x0000000a}, {0x810, 0x10005388},
+ {0x814, 0x020c3d10}, {0x818, 0x02200385},
+ {0x81c, 0x00000000}, {0x820, 0x01000100},
+ {0x824, 0x00390204}, {0x828, 0x00000000},
+ {0x82c, 0x00000000}, {0x830, 0x00000000},
+ {0x834, 0x00000000}, {0x838, 0x00000000},
+ {0x83c, 0x00000000}, {0x840, 0x00010000},
+ {0x844, 0x00000000}, {0x848, 0x00000000},
+ {0x84c, 0x00000000}, {0x850, 0x00000000},
+ {0x854, 0x00000000}, {0x858, 0x569a569a},
+ {0x85c, 0x001b25a4}, {0x860, 0x66e60230},
+ {0x864, 0x061f0130}, {0x868, 0x00000000},
+ {0x86c, 0x20202000}, {0x870, 0x03000300},
+ {0x874, 0x22004000}, {0x878, 0x00000808},
+ {0x87c, 0x00ffc3f1}, {0x880, 0xc0083070},
+ {0x884, 0x000004d5}, {0x888, 0x00000000},
+ {0x88c, 0xccc000c0}, {0x890, 0x00000800},
+ {0x894, 0xfffffffe}, {0x898, 0x40302010},
+ {0x89c, 0x00706050}, {0x900, 0x00000000},
+ {0x904, 0x00000023}, {0x908, 0x00000000},
+ {0x90c, 0x81121111}, {0xa00, 0x00d047c8},
+ {0xa04, 0x80ff000c}, {0xa08, 0x8c838300},
+ {0xa0c, 0x2e68120f}, {0xa10, 0x9500bb78},
+ {0xa14, 0x11144028}, {0xa18, 0x00881117},
+ {0xa1c, 0x89140f00}, {0xa20, 0x15160000},
+ {0xa24, 0x070b0f12}, {0xa28, 0x00000104},
+ {0xa2c, 0x00d30000}, {0xa70, 0x101fbf00},
+ {0xa74, 0x00000007}, {0xc00, 0x48071d40},
+ {0xc04, 0x03a05611}, {0xc08, 0x000000e4},
+ {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+ {0xc14, 0x40000100}, {0xc18, 0x08800000},
+ {0xc1c, 0x40000100}, {0xc20, 0x00000000},
+ {0xc24, 0x00000000}, {0xc28, 0x00000000},
+ {0xc2c, 0x00000000}, {0xc30, 0x69e9ac44},
+ {0xc34, 0x469652cf}, {0xc38, 0x49795994},
+ {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+ {0xc44, 0x000100b7}, {0xc48, 0xec020107},
+ {0xc4c, 0x007f037f}, {0xc50, 0x6954342e},
+ {0xc54, 0x43bc0094}, {0xc58, 0x6954342f},
+ {0xc5c, 0x433c0094}, {0xc60, 0x00000000},
+ {0xc64, 0x5116848b}, {0xc68, 0x47c00bff},
+ {0xc6c, 0x00000036}, {0xc70, 0x2c46000d},
+ {0xc74, 0x018610db}, {0xc78, 0x0000001f},
+ {0xc7c, 0x00b91612}, {0xc80, 0x24000090},
+ {0xc84, 0x20f60000}, {0xc88, 0x24000090},
+ {0xc8c, 0x20200000}, {0xc90, 0x00121820},
+ {0xc94, 0x00000000}, {0xc98, 0x00121820},
+ {0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+ {0xca4, 0x00000080}, {0xca8, 0x00000000},
+ {0xcac, 0x00000000}, {0xcb0, 0x00000000},
+ {0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+ {0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+ {0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+ {0xccc, 0x00000000}, {0xcd0, 0x00000000},
+ {0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+ {0xcdc, 0x00766932}, {0xce0, 0x00222222},
+ {0xce4, 0x00000000}, {0xce8, 0x37644302},
+ {0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+ {0xd04, 0x00020401}, {0xd08, 0x0000907f},
+ {0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+ {0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+ {0xd2c, 0xcc979975}, {0xd30, 0x00000000},
+ {0xd34, 0x80608000}, {0xd38, 0x00000000},
+ {0xd3c, 0x00027293}, {0xd40, 0x00000000},
+ {0xd44, 0x00000000}, {0xd48, 0x00000000},
+ {0xd4c, 0x00000000}, {0xd50, 0x6437140a},
+ {0xd54, 0x00000000}, {0xd58, 0x00000000},
+ {0xd5c, 0x30032064}, {0xd60, 0x4653de68},
+ {0xd64, 0x04518a3c}, {0xd68, 0x00002101},
+ {0xd6c, 0x2a201c16}, {0xd70, 0x1812362e},
+ {0xd74, 0x322c2220}, {0xd78, 0x000e3c24},
+ {0xe00, 0x24242424}, {0xe04, 0x24242424},
+ {0xe08, 0x03902024}, {0xe10, 0x24242424},
+ {0xe14, 0x24242424}, {0xe18, 0x24242424},
+ {0xe1c, 0x24242424}, {0xe28, 0x00000000},
+ {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+ {0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+ {0xe40, 0x01007c00}, {0xe44, 0x01004800},
+ {0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+ {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+ {0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+ {0xe60, 0x00000008}, {0xe68, 0x001b25a4},
+ {0xe6c, 0x631b25a0}, {0xe70, 0x631b25a0},
+ {0xe74, 0x081b25a0}, {0xe78, 0x081b25a0},
+ {0xe7c, 0x081b25a0}, {0xe80, 0x081b25a0},
+ {0xe84, 0x631b25a0}, {0xe88, 0x081b25a0},
+ {0xe8c, 0x631b25a0}, {0xed0, 0x631b25a0},
+ {0xed4, 0x631b25a0}, {0xed8, 0x631b25a0},
+ {0xedc, 0x001b25a0}, {0xee0, 0x001b25a0},
+ {0xeec, 0x6b1b25a0}, {0xee8, 0x31555448},
+ {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+ {0xf00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
+ {0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+ {0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+ {0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+ {0xc78, 0x7a060001}, {0xc78, 0x79070001},
+ {0xc78, 0x78080001}, {0xc78, 0x77090001},
+ {0xc78, 0x760a0001}, {0xc78, 0x750b0001},
+ {0xc78, 0x740c0001}, {0xc78, 0x730d0001},
+ {0xc78, 0x720e0001}, {0xc78, 0x710f0001},
+ {0xc78, 0x70100001}, {0xc78, 0x6f110001},
+ {0xc78, 0x6e120001}, {0xc78, 0x6d130001},
+ {0xc78, 0x6c140001}, {0xc78, 0x6b150001},
+ {0xc78, 0x6a160001}, {0xc78, 0x69170001},
+ {0xc78, 0x68180001}, {0xc78, 0x67190001},
+ {0xc78, 0x661a0001}, {0xc78, 0x651b0001},
+ {0xc78, 0x641c0001}, {0xc78, 0x631d0001},
+ {0xc78, 0x621e0001}, {0xc78, 0x611f0001},
+ {0xc78, 0x60200001}, {0xc78, 0x49210001},
+ {0xc78, 0x48220001}, {0xc78, 0x47230001},
+ {0xc78, 0x46240001}, {0xc78, 0x45250001},
+ {0xc78, 0x44260001}, {0xc78, 0x43270001},
+ {0xc78, 0x42280001}, {0xc78, 0x41290001},
+ {0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+ {0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+ {0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+ {0xc78, 0x21300001}, {0xc78, 0x20310001},
+ {0xc78, 0x06320001}, {0xc78, 0x05330001},
+ {0xc78, 0x04340001}, {0xc78, 0x03350001},
+ {0xc78, 0x02360001}, {0xc78, 0x01370001},
+ {0xc78, 0x00380001}, {0xc78, 0x00390001},
+ {0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+ {0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+ {0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+ {0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+ {0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+ {0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+ {0xc78, 0x7a460001}, {0xc78, 0x79470001},
+ {0xc78, 0x78480001}, {0xc78, 0x77490001},
+ {0xc78, 0x764a0001}, {0xc78, 0x754b0001},
+ {0xc78, 0x744c0001}, {0xc78, 0x734d0001},
+ {0xc78, 0x724e0001}, {0xc78, 0x714f0001},
+ {0xc78, 0x70500001}, {0xc78, 0x6f510001},
+ {0xc78, 0x6e520001}, {0xc78, 0x6d530001},
+ {0xc78, 0x6c540001}, {0xc78, 0x6b550001},
+ {0xc78, 0x6a560001}, {0xc78, 0x69570001},
+ {0xc78, 0x68580001}, {0xc78, 0x67590001},
+ {0xc78, 0x665a0001}, {0xc78, 0x655b0001},
+ {0xc78, 0x645c0001}, {0xc78, 0x635d0001},
+ {0xc78, 0x625e0001}, {0xc78, 0x615f0001},
+ {0xc78, 0x60600001}, {0xc78, 0x49610001},
+ {0xc78, 0x48620001}, {0xc78, 0x47630001},
+ {0xc78, 0x46640001}, {0xc78, 0x45650001},
+ {0xc78, 0x44660001}, {0xc78, 0x43670001},
+ {0xc78, 0x42680001}, {0xc78, 0x41690001},
+ {0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+ {0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+ {0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+ {0xc78, 0x21700001}, {0xc78, 0x20710001},
+ {0xc78, 0x06720001}, {0xc78, 0x05730001},
+ {0xc78, 0x04740001}, {0xc78, 0x03750001},
+ {0xc78, 0x02760001}, {0xc78, 0x01770001},
+ {0xc78, 0x00780001}, {0xc78, 0x00790001},
+ {0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+ {0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+ {0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+ {0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+ {0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+ {0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+ {0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+ {0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+ {0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+ {0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+ {0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+ {0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+ {0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+ {0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+ {0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+ {0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+ {0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+ {0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+ {0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
+ {0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+ {0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+ {0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+ {0xc78, 0x7b060001}, {0xc78, 0x7b070001},
+ {0xc78, 0x7b080001}, {0xc78, 0x7a090001},
+ {0xc78, 0x790a0001}, {0xc78, 0x780b0001},
+ {0xc78, 0x770c0001}, {0xc78, 0x760d0001},
+ {0xc78, 0x750e0001}, {0xc78, 0x740f0001},
+ {0xc78, 0x73100001}, {0xc78, 0x72110001},
+ {0xc78, 0x71120001}, {0xc78, 0x70130001},
+ {0xc78, 0x6f140001}, {0xc78, 0x6e150001},
+ {0xc78, 0x6d160001}, {0xc78, 0x6c170001},
+ {0xc78, 0x6b180001}, {0xc78, 0x6a190001},
+ {0xc78, 0x691a0001}, {0xc78, 0x681b0001},
+ {0xc78, 0x671c0001}, {0xc78, 0x661d0001},
+ {0xc78, 0x651e0001}, {0xc78, 0x641f0001},
+ {0xc78, 0x63200001}, {0xc78, 0x62210001},
+ {0xc78, 0x61220001}, {0xc78, 0x60230001},
+ {0xc78, 0x46240001}, {0xc78, 0x45250001},
+ {0xc78, 0x44260001}, {0xc78, 0x43270001},
+ {0xc78, 0x42280001}, {0xc78, 0x41290001},
+ {0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+ {0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+ {0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+ {0xc78, 0x21300001}, {0xc78, 0x20310001},
+ {0xc78, 0x06320001}, {0xc78, 0x05330001},
+ {0xc78, 0x04340001}, {0xc78, 0x03350001},
+ {0xc78, 0x02360001}, {0xc78, 0x01370001},
+ {0xc78, 0x00380001}, {0xc78, 0x00390001},
+ {0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+ {0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+ {0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+ {0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+ {0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+ {0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+ {0xc78, 0x7b460001}, {0xc78, 0x7b470001},
+ {0xc78, 0x7b480001}, {0xc78, 0x7a490001},
+ {0xc78, 0x794a0001}, {0xc78, 0x784b0001},
+ {0xc78, 0x774c0001}, {0xc78, 0x764d0001},
+ {0xc78, 0x754e0001}, {0xc78, 0x744f0001},
+ {0xc78, 0x73500001}, {0xc78, 0x72510001},
+ {0xc78, 0x71520001}, {0xc78, 0x70530001},
+ {0xc78, 0x6f540001}, {0xc78, 0x6e550001},
+ {0xc78, 0x6d560001}, {0xc78, 0x6c570001},
+ {0xc78, 0x6b580001}, {0xc78, 0x6a590001},
+ {0xc78, 0x695a0001}, {0xc78, 0x685b0001},
+ {0xc78, 0x675c0001}, {0xc78, 0x665d0001},
+ {0xc78, 0x655e0001}, {0xc78, 0x645f0001},
+ {0xc78, 0x63600001}, {0xc78, 0x62610001},
+ {0xc78, 0x61620001}, {0xc78, 0x60630001},
+ {0xc78, 0x46640001}, {0xc78, 0x45650001},
+ {0xc78, 0x44660001}, {0xc78, 0x43670001},
+ {0xc78, 0x42680001}, {0xc78, 0x41690001},
+ {0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+ {0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+ {0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+ {0xc78, 0x21700001}, {0xc78, 0x20710001},
+ {0xc78, 0x06720001}, {0xc78, 0x05730001},
+ {0xc78, 0x04740001}, {0xc78, 0x03750001},
+ {0xc78, 0x02760001}, {0xc78, 0x01770001},
+ {0xc78, 0x00780001}, {0xc78, 0x00790001},
+ {0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+ {0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+ {0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+ {0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+ {0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+ {0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+ {0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+ {0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+ {0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+ {0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+ {0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+ {0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+ {0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+ {0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+ {0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+ {0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+ {0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+ {0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+ {0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00039c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
+ {0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00030355},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0000024f},
+ {0x1f, 0x00000000}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x00057730},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f474},
+ {0x15, 0x0004f477}, {0x15, 0x0008f455},
+ {0x15, 0x000cf455}, {0x16, 0x00000339},
+ {0x16, 0x00040339}, {0x16, 0x00080339},
+ {0x16, 0x000c0366}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00000003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00000247}, {0x1f, 0x00000000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00010255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f424},
+ {0x15, 0x0004f424}, {0x15, 0x0008f424},
+ {0x15, 0x000cf424}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287af}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x00014297},
+ {0x13, 0x00010295}, {0x13, 0x0000c298},
+ {0x13, 0x0000819c}, {0x13, 0x000040a8},
+ {0x13, 0x0000001c}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f424},
+ {0x15, 0x0004f424}, {0x15, 0x0008f424},
+ {0x15, 0x000cf424}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00010255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f405},
+ {0x15, 0x0004f405}, {0x15, 0x0008f405},
+ {0x15, 0x000cf405}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb0}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e529},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00000255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x0000083c},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000977c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x000d8000}, {0x12, 0x00090000},
+ {0x12, 0x00051000}, {0x12, 0x00012000},
+ {0x13, 0x00028fb4}, {0x13, 0x00024fa8},
+ {0x13, 0x000207a4}, {0x13, 0x0001c3b0},
+ {0x13, 0x000183a4}, {0x13, 0x00014398},
+ {0x13, 0x000101a4}, {0x13, 0x0000c198},
+ {0x13, 0x000080a4}, {0x13, 0x00004098},
+ {0x13, 0x00000000}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f405},
+ {0x15, 0x0004f405}, {0x15, 0x0008f405},
+ {0x15, 0x000cf405}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
+ { /* RF_A */
+ .hssiparm1 = REG_FPGA0_XA_HSSI_PARM1,
+ .hssiparm2 = REG_FPGA0_XA_HSSI_PARM2,
+ .lssiparm = REG_FPGA0_XA_LSSI_PARM,
+ .hspiread = REG_HSPI_XA_READBACK,
+ .lssiread = REG_FPGA0_XA_LSSI_READBACK,
+ .rf_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL,
+ },
+ { /* RF_B */
+ .hssiparm1 = REG_FPGA0_XB_HSSI_PARM1,
+ .hssiparm2 = REG_FPGA0_XB_HSSI_PARM2,
+ .lssiparm = REG_FPGA0_XB_LSSI_PARM,
+ .hspiread = REG_HSPI_XB_READBACK,
+ .lssiread = REG_FPGA0_XB_LSSI_READBACK,
+ .rf_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL,
+ },
+};
+
+static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_XA_RX_IQ_IMBALANCE,
+ REG_OFDM0_XB_RX_IQ_IMBALANCE,
+ REG_OFDM0_ENERGY_CCA_THRES,
+ REG_OFDM0_AGCR_SSI_TABLE,
+ REG_OFDM0_XA_TX_IQ_IMBALANCE,
+ REG_OFDM0_XB_TX_IQ_IMBALANCE,
+ REG_OFDM0_XC_TX_AFE,
+ REG_OFDM0_XD_TX_AFE,
+ REG_OFDM0_RX_IQ_EXT_ANTA
+};
+
+static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
+{
+ struct usb_device *udev = priv->udev;
+ int len;
+ u8 data;
+
+ mutex_lock(&priv->usb_buf_mutex);
+ len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+ addr, 0, &priv->usb_buf.val8, sizeof(u8),
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+ data = priv->usb_buf.val8;
+ mutex_unlock(&priv->usb_buf_mutex);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+ dev_info(&udev->dev, "%s(%04x) = 0x%02x, len %i\n",
+ __func__, addr, data, len);
+ return data;
+}
+
+static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
+{
+ struct usb_device *udev = priv->udev;
+ int len;
+ u16 data;
+
+ mutex_lock(&priv->usb_buf_mutex);
+ len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+ addr, 0, &priv->usb_buf.val16, sizeof(u16),
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+ data = le16_to_cpu(priv->usb_buf.val16);
+ mutex_unlock(&priv->usb_buf_mutex);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+ dev_info(&udev->dev, "%s(%04x) = 0x%04x, len %i\n",
+ __func__, addr, data, len);
+ return data;
+}
+
+static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
+{
+ struct usb_device *udev = priv->udev;
+ int len;
+ u32 data;
+
+ mutex_lock(&priv->usb_buf_mutex);
+ len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+ addr, 0, &priv->usb_buf.val32, sizeof(u32),
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+ data = le32_to_cpu(priv->usb_buf.val32);
+ mutex_unlock(&priv->usb_buf_mutex);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+ dev_info(&udev->dev, "%s(%04x) = 0x%08x, len %i\n",
+ __func__, addr, data, len);
+ return data;
+}
+
+static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
+{
+ struct usb_device *udev = priv->udev;
+ int ret;
+
+ mutex_lock(&priv->usb_buf_mutex);
+ priv->usb_buf.val8 = val;
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+ addr, 0, &priv->usb_buf.val8, sizeof(u8),
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+
+ mutex_unlock(&priv->usb_buf_mutex);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+ dev_info(&udev->dev, "%s(%04x) = 0x%02x\n",
+ __func__, addr, val);
+ return ret;
+}
+
+static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
+{
+ struct usb_device *udev = priv->udev;
+ int ret;
+
+ mutex_lock(&priv->usb_buf_mutex);
+ priv->usb_buf.val16 = cpu_to_le16(val);
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+ addr, 0, &priv->usb_buf.val16, sizeof(u16),
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+ mutex_unlock(&priv->usb_buf_mutex);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+ dev_info(&udev->dev, "%s(%04x) = 0x%04x\n",
+ __func__, addr, val);
+ return ret;
+}
+
+static int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
+{
+ struct usb_device *udev = priv->udev;
+ int ret;
+
+ mutex_lock(&priv->usb_buf_mutex);
+ priv->usb_buf.val32 = cpu_to_le32(val);
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+ addr, 0, &priv->usb_buf.val32, sizeof(u32),
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+ mutex_unlock(&priv->usb_buf_mutex);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+ dev_info(&udev->dev, "%s(%04x) = 0x%08x\n",
+ __func__, addr, val);
+ return ret;
+}
+
+static int
+rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
+{
+ struct usb_device *udev = priv->udev;
+ int blocksize = priv->fops->writeN_block_size;
+ int ret, i, count, remainder;
+
+ count = len / blocksize;
+ remainder = len % blocksize;
+
+ for (i = 0; i < count; i++) {
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+ addr, 0, buf, blocksize,
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+ if (ret != blocksize)
+ goto write_error;
+
+ addr += blocksize;
+ buf += blocksize;
+ }
+
+ if (remainder) {
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+ addr, 0, buf, remainder,
+ RTW_USB_CONTROL_MSG_TIMEOUT);
+ if (ret != remainder)
+ goto write_error;
+ }
+
+ return len;
+
+write_error:
+ dev_info(&udev->dev,
+ "%s: Failed to write block at addr: %04x size: %04x\n",
+ __func__, addr, blocksize);
+ return -EAGAIN;
+}
+
+static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg)
+{
+ u32 hssia, val32, retval;
+
+ hssia = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+ if (path != RF_A)
+ val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm2);
+ else
+ val32 = hssia;
+
+ val32 &= ~FPGA0_HSSI_PARM2_ADDR_MASK;
+ val32 |= (reg << FPGA0_HSSI_PARM2_ADDR_SHIFT);
+ val32 |= FPGA0_HSSI_PARM2_EDGE_READ;
+ hssia &= ~FPGA0_HSSI_PARM2_EDGE_READ;
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+
+ udelay(10);
+
+ rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].hssiparm2, val32);
+ udelay(100);
+
+ hssia |= FPGA0_HSSI_PARM2_EDGE_READ;
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+ udelay(10);
+
+ val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm1);
+ if (val32 & FPGA0_HSSI_PARM1_PI)
+ retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hspiread);
+ else
+ retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].lssiread);
+
+ retval &= 0xfffff;
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_READ)
+ dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+ __func__, reg, retval);
+ return retval;
+}
+
+static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg, u32 data)
+{
+ int ret, retval;
+ u32 dataaddr;
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
+ dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+ __func__, reg, data);
+
+ data &= FPGA0_LSSI_PARM_DATA_MASK;
+ dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
+
+ /* Use XB for path B */
+ ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
+ if (ret != sizeof(dataaddr))
+ retval = -EIO;
+ else
+ retval = 0;
+
+ udelay(1);
+
+ return retval;
+}
+
+static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
+{
+ struct device *dev = &priv->udev->dev;
+ int mbox_nr, retry, retval = 0;
+ int mbox_reg, mbox_ext_reg;
+ u8 val8;
+
+ mutex_lock(&priv->h2c_mutex);
+
+ mbox_nr = priv->next_mbox;
+ mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
+ mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2);
+
+ /*
+ * MBOX ready?
+ */
+ retry = 100;
+ do {
+ val8 = rtl8xxxu_read8(priv, REG_HMTFR);
+ if (!(val8 & BIT(mbox_nr)))
+ break;
+ } while (retry--);
+
+ if (!retry) {
+ dev_dbg(dev, "%s: Mailbox busy\n", __func__);
+ retval = -EBUSY;
+ goto error;
+ }
+
+ /*
+ * Need to swap as it's being swapped again by rtl8xxxu_write16/32()
+ */
+ if (h2c->cmd.cmd & H2C_EXT) {
+ rtl8xxxu_write16(priv, mbox_ext_reg,
+ le16_to_cpu(h2c->raw.ext));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C_EXT %04x\n",
+ le16_to_cpu(h2c->raw.ext));
+ }
+ rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C %08x\n", le32_to_cpu(h2c->raw.data));
+
+ priv->next_mbox = (mbox_nr + 1) % H2C_MAX_MBOX;
+
+error:
+ mutex_unlock(&priv->h2c_mutex);
+ return retval;
+}
+
+static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+
+ val8 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+ val8 |= BIT(0) | BIT(3);
+ rtl8xxxu_write8(priv, REG_SPS0_CTRL, val8);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+ val32 &= ~(BIT(4) | BIT(5));
+ val32 |= BIT(3);
+ if (priv->rf_paths == 2) {
+ val32 &= ~(BIT(20) | BIT(21));
+ val32 |= BIT(19);
+ }
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~OFDM_RF_PATH_TX_MASK;
+ if (priv->tx_paths == 2)
+ val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
+ else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+ val32 |= OFDM_RF_PATH_TX_B;
+ else
+ val32 |= OFDM_RF_PATH_TX_A;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE_JAPAN;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ if (priv->rf_paths == 2)
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x63db25a0);
+ else
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x631b25a0);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x32d95);
+ if (priv->rf_paths == 2)
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x32d95);
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+{
+ u8 sps0;
+ u32 val32;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+
+ /* RF RX code for preamble power saving */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+ val32 &= ~(BIT(3) | BIT(4) | BIT(5));
+ if (priv->rf_paths == 2)
+ val32 &= ~(BIT(19) | BIT(20) | BIT(21));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+ /* Disable TX for four paths */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~OFDM_RF_PATH_TX_MASK;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ /* Enable power saving */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= FPGA_RF_MODE_JAPAN;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ /* AFE control register to power down bits [30:22] */
+ if (priv->rf_paths == 2)
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00db25a0);
+ else
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x001b25a0);
+
+ /* Power down RF module */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0);
+ if (priv->rf_paths == 2)
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0);
+
+ sps0 &= ~(BIT(0) | BIT(3));
+ rtl8xxxu_write8(priv, REG_SPS0_CTRL, sps0);
+}
+
+
+static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL + 2);
+ val8 &= ~BIT(6);
+ rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL + 2, val8);
+
+ rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 1, 0x64);
+ val8 = rtl8xxxu_read8(priv, REG_TBTT_PROHIBIT + 2);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 2, val8);
+}
+
+
+/*
+ * The rtl8723a has 3 channel groups for it's efuse settings. It only
+ * supports the 2.4GHz band, so channels 1 - 14:
+ * group 0: channels 1 - 3
+ * group 1: channels 4 - 9
+ * group 2: channels 10 - 14
+ *
+ * Note: We index from 0 in the code
+ */
+static int rtl8723a_channel_to_group(int channel)
+{
+ int group;
+
+ if (channel < 4)
+ group = 0;
+ else if (channel < 10)
+ group = 1;
+ else
+ group = 2;
+
+ return group;
+}
+
+static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ u32 val32, rsr;
+ u8 val8, opmode;
+ bool ht = true;
+ int sec_ch_above, channel;
+ int i;
+
+ opmode = rtl8xxxu_read8(priv, REG_BW_OPMODE);
+ rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ channel = hw->conf.chandef.chan->hw_value;
+
+ switch (hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ ht = false;
+ case NL80211_CHAN_WIDTH_20:
+ opmode |= BW_OPMODE_20MHZ;
+ rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+ val32 |= FPGA0_ANALOG2_20MHZ;
+ rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ if (hw->conf.chandef.center_freq1 >
+ hw->conf.chandef.chan->center_freq) {
+ sec_ch_above = 1;
+ channel += 2;
+ } else {
+ sec_ch_above = 0;
+ channel -= 2;
+ }
+
+ opmode &= ~BW_OPMODE_20MHZ;
+ rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+ rsr &= ~RSR_RSC_BANDWIDTH_40M;
+ if (sec_ch_above)
+ rsr |= RSR_RSC_UPPER_SUB_CHANNEL;
+ else
+ rsr |= RSR_RSC_LOWER_SUB_CHANNEL;
+ rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, rsr);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+ /*
+ * Set Control channel to upper or lower. These settings
+ * are required only for 40MHz
+ */
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+ val32 &= ~CCK0_SIDEBAND;
+ if (!sec_ch_above)
+ val32 |= CCK0_SIDEBAND;
+ rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+ val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */
+ if (sec_ch_above)
+ val32 |= OFDM_LSTF_PRIME_CH_LOW;
+ else
+ val32 |= OFDM_LSTF_PRIME_CH_HIGH;
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+ val32 &= ~FPGA0_ANALOG2_20MHZ;
+ rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL);
+ if (sec_ch_above)
+ val32 |= FPGA0_PS_UPPER_CHANNEL;
+ else
+ val32 |= FPGA0_PS_LOWER_CHANNEL;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ val32 &= ~MODE_AG_CHANNEL_MASK;
+ val32 |= channel;
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+
+ if (ht)
+ val8 = 0x0e;
+ else
+ val8 = 0x0a;
+
+ rtl8xxxu_write8(priv, REG_SIFS_CCK + 1, val8);
+ rtl8xxxu_write8(priv, REG_SIFS_OFDM + 1, val8);
+
+ rtl8xxxu_write16(priv, REG_R2T_SIFS, 0x0808);
+ rtl8xxxu_write16(priv, REG_T2T_SIFS, 0x0a0a);
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+ val32 &= ~MODE_AG_CHANNEL_20MHZ;
+ else
+ val32 |= MODE_AG_CHANNEL_20MHZ;
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+}
+
+static void
+rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+ u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
+ u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
+ u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
+ u8 val8;
+ int group, i;
+
+ group = rtl8723a_channel_to_group(channel);
+
+ cck[0] = priv->cck_tx_power_index_A[group];
+ cck[1] = priv->cck_tx_power_index_B[group];
+
+ ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
+ ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+
+ ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
+ ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
+
+ mcsbase[0] = ofdm[0];
+ mcsbase[1] = ofdm[1];
+ if (!ht40) {
+ mcsbase[0] += priv->ht20_tx_power_index_diff[group].a;
+ mcsbase[1] += priv->ht20_tx_power_index_diff[group].b;
+ }
+
+ if (priv->tx_paths > 1) {
+ if (ofdm[0] > priv->ht40_2s_tx_power_index_diff[group].a)
+ ofdm[0] -= priv->ht40_2s_tx_power_index_diff[group].a;
+ if (ofdm[1] > priv->ht40_2s_tx_power_index_diff[group].b)
+ ofdm[1] -= priv->ht40_2s_tx_power_index_diff[group].b;
+ }
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+ dev_info(&priv->udev->dev,
+ "%s: Setting TX power CCK A: %02x, "
+ "CCK B: %02x, OFDM A: %02x, OFDM B: %02x\n",
+ __func__, cck[0], cck[1], ofdm[0], ofdm[1]);
+
+ for (i = 0; i < RTL8723A_MAX_RF_PATHS; i++) {
+ if (cck[i] > RF6052_MAX_TX_PWR)
+ cck[i] = RF6052_MAX_TX_PWR;
+ if (ofdm[i] > RF6052_MAX_TX_PWR)
+ ofdm[i] = RF6052_MAX_TX_PWR;
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+ val32 &= 0xffff00ff;
+ val32 |= (cck[0] << 8);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xff;
+ val32 |= ((cck[0] << 8) | (cck[0] << 16) | (cck[0] << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xffffff00;
+ val32 |= cck[1];
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+ val32 &= 0xff;
+ val32 |= ((cck[1] << 8) | (cck[1] << 16) | (cck[1] << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+ ofdm_a = ofdmbase[0] | ofdmbase[0] << 8 |
+ ofdmbase[0] << 16 | ofdmbase[0] << 24;
+ ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
+ ofdmbase[1] << 16 | ofdmbase[1] << 24;
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+
+ mcs_a = mcsbase[0] | mcsbase[0] << 8 |
+ mcsbase[0] << 16 | mcsbase[0] << 24;
+ mcs_b = mcsbase[1] | mcsbase[1] << 8 |
+ mcsbase[1] << 16 | mcsbase[1] << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
+ else
+ val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
+ rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
+ }
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
+ else
+ val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
+ rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
+ }
+}
+
+static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
+ enum nl80211_iftype linktype)
+{
+ u16 val8;
+
+ val8 = rtl8xxxu_read16(priv, REG_MSR);
+ val8 &= ~MSR_LINKTYPE_MASK;
+
+ switch (linktype) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ val8 |= MSR_LINKTYPE_NONE;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ val8 |= MSR_LINKTYPE_ADHOC;
+ break;
+ case NL80211_IFTYPE_STATION:
+ val8 |= MSR_LINKTYPE_STATION;
+ break;
+ case NL80211_IFTYPE_AP:
+ val8 |= MSR_LINKTYPE_AP;
+ break;
+ default:
+ goto out;
+ }
+
+ rtl8xxxu_write8(priv, REG_MSR, val8);
+out:
+ return;
+}
+
+static void
+rtl8xxxu_set_retry(struct rtl8xxxu_priv *priv, u16 short_retry, u16 long_retry)
+{
+ u16 val16;
+
+ val16 = ((short_retry << RETRY_LIMIT_SHORT_SHIFT) &
+ RETRY_LIMIT_SHORT_MASK) |
+ ((long_retry << RETRY_LIMIT_LONG_SHIFT) &
+ RETRY_LIMIT_LONG_MASK);
+
+ rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+}
+
+static void
+rtl8xxxu_set_spec_sifs(struct rtl8xxxu_priv *priv, u16 cck, u16 ofdm)
+{
+ u16 val16;
+
+ val16 = ((cck << SPEC_SIFS_CCK_SHIFT) & SPEC_SIFS_CCK_MASK) |
+ ((ofdm << SPEC_SIFS_OFDM_SHIFT) & SPEC_SIFS_OFDM_MASK);
+
+ rtl8xxxu_write16(priv, REG_SPEC_SIFS, val16);
+}
+
+static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ char *cut;
+
+ switch (priv->chip_cut) {
+ case 0:
+ cut = "A";
+ break;
+ case 1:
+ cut = "B";
+ break;
+ default:
+ cut = "unknown";
+ }
+
+ dev_info(dev,
+ "RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+ priv->chip_name, cut, priv->vendor_umc ? "UMC" : "TSMC",
+ priv->tx_paths, priv->rx_paths, priv->ep_tx_count,
+ priv->has_wifi, priv->has_bluetooth, priv->has_gps,
+ priv->hi_pa);
+
+ dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
+}
+
+static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 val32, bonding;
+ u16 val16;
+
+ val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
+ SYS_CFG_CHIP_VERSION_SHIFT;
+ if (val32 & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ return -ENOTSUPP;
+ }
+
+ if (val32 & SYS_CFG_BT_FUNC) {
+ sprintf(priv->chip_name, "8723AU");
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+ priv->rtlchip = 0x8723a;
+
+ val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
+ if (val32 & MULTI_WIFI_FUNC_EN)
+ priv->has_wifi = 1;
+ if (val32 & MULTI_BT_FUNC_EN)
+ priv->has_bluetooth = 1;
+ if (val32 & MULTI_GPS_FUNC_EN)
+ priv->has_gps = 1;
+ } else if (val32 & SYS_CFG_TYPE_ID) {
+ bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+ bonding &= HPON_FSM_BONDING_MASK;
+ if (bonding == HPON_FSM_BONDING_1T2R) {
+ sprintf(priv->chip_name, "8191CU");
+ priv->rf_paths = 2;
+ priv->rx_paths = 2;
+ priv->tx_paths = 1;
+ priv->rtlchip = 0x8191c;
+ } else {
+ sprintf(priv->chip_name, "8192CU");
+ priv->rf_paths = 2;
+ priv->rx_paths = 2;
+ priv->tx_paths = 2;
+ priv->rtlchip = 0x8192c;
+ }
+ priv->has_wifi = 1;
+ } else {
+ sprintf(priv->chip_name, "8188CU");
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+ priv->rtlchip = 0x8188c;
+ priv->has_wifi = 1;
+ }
+
+ if (val32 & SYS_CFG_VENDOR_ID)
+ priv->vendor_umc = 1;
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+ priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28;
+
+ val16 = rtl8xxxu_read16(priv, REG_NORMAL_SIE_EP_TX);
+ if (val16 & NORMAL_SIE_EP_TX_HIGH_MASK) {
+ priv->ep_tx_high_queue = 1;
+ priv->ep_tx_count++;
+ }
+
+ if (val16 & NORMAL_SIE_EP_TX_NORMAL_MASK) {
+ priv->ep_tx_normal_queue = 1;
+ priv->ep_tx_count++;
+ }
+
+ if (val16 & NORMAL_SIE_EP_TX_LOW_MASK) {
+ priv->ep_tx_low_queue = 1;
+ priv->ep_tx_count++;
+ }
+
+ /*
+ * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+ */
+ if (!priv->ep_tx_count) {
+ switch (priv->nr_out_eps) {
+ case 3:
+ priv->ep_tx_low_queue = 1;
+ priv->ep_tx_count++;
+ case 2:
+ priv->ep_tx_normal_queue = 1;
+ priv->ep_tx_count++;
+ case 1:
+ priv->ep_tx_high_queue = 1;
+ priv->ep_tx_count++;
+ break;
+ default:
+ dev_info(dev, "Unsupported USB TX end-points\n");
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ if (priv->efuse_wifi.efuse8723.rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723.mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A,
+ priv->efuse_wifi.efuse8723.cck_tx_power_index_A,
+ sizeof(priv->cck_tx_power_index_A));
+ memcpy(priv->cck_tx_power_index_B,
+ priv->efuse_wifi.efuse8723.cck_tx_power_index_B,
+ sizeof(priv->cck_tx_power_index_B));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_A,
+ sizeof(priv->ht40_1s_tx_power_index_A));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_B,
+ sizeof(priv->ht40_1s_tx_power_index_B));
+
+ memcpy(priv->ht20_tx_power_index_diff,
+ priv->efuse_wifi.efuse8723.ht20_tx_power_index_diff,
+ sizeof(priv->ht20_tx_power_index_diff));
+ memcpy(priv->ofdm_tx_power_index_diff,
+ priv->efuse_wifi.efuse8723.ofdm_tx_power_index_diff,
+ sizeof(priv->ofdm_tx_power_index_diff));
+
+ memcpy(priv->ht40_max_power_offset,
+ priv->efuse_wifi.efuse8723.ht40_max_power_offset,
+ sizeof(priv->ht40_max_power_offset));
+ memcpy(priv->ht20_max_power_offset,
+ priv->efuse_wifi.efuse8723.ht20_max_power_offset,
+ sizeof(priv->ht20_max_power_offset));
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+ priv->efuse_wifi.efuse8723.vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.41s\n",
+ priv->efuse_wifi.efuse8723.device_name);
+ return 0;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ int i;
+
+ if (priv->efuse_wifi.efuse8192.rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192.mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A,
+ priv->efuse_wifi.efuse8192.cck_tx_power_index_A,
+ sizeof(priv->cck_tx_power_index_A));
+ memcpy(priv->cck_tx_power_index_B,
+ priv->efuse_wifi.efuse8192.cck_tx_power_index_B,
+ sizeof(priv->cck_tx_power_index_B));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_A,
+ sizeof(priv->ht40_1s_tx_power_index_A));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_B,
+ sizeof(priv->ht40_1s_tx_power_index_B));
+ memcpy(priv->ht40_2s_tx_power_index_diff,
+ priv->efuse_wifi.efuse8192.ht40_2s_tx_power_index_diff,
+ sizeof(priv->ht40_2s_tx_power_index_diff));
+
+ memcpy(priv->ht20_tx_power_index_diff,
+ priv->efuse_wifi.efuse8192.ht20_tx_power_index_diff,
+ sizeof(priv->ht20_tx_power_index_diff));
+ memcpy(priv->ofdm_tx_power_index_diff,
+ priv->efuse_wifi.efuse8192.ofdm_tx_power_index_diff,
+ sizeof(priv->ofdm_tx_power_index_diff));
+
+ memcpy(priv->ht40_max_power_offset,
+ priv->efuse_wifi.efuse8192.ht40_max_power_offset,
+ sizeof(priv->ht40_max_power_offset));
+ memcpy(priv->ht20_max_power_offset,
+ priv->efuse_wifi.efuse8192.ht20_max_power_offset,
+ sizeof(priv->ht20_max_power_offset));
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+ priv->efuse_wifi.efuse8192.vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.20s\n",
+ priv->efuse_wifi.efuse8192.device_name);
+
+ if (priv->efuse_wifi.efuse8192.rf_regulatory & 0x20) {
+ sprintf(priv->chip_name, "8188RU");
+ priv->hi_pa = 1;
+ }
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8192cu_efuse));
+ for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+ return 0;
+}
+
+#endif
+
+static int
+rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
+{
+ int i;
+ u8 val8;
+ u32 val32;
+
+ /* Write Address */
+ rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 1, offset & 0xff);
+ val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 2);
+ val8 &= 0xfc;
+ val8 |= (offset >> 8) & 0x03;
+ rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 2, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 3);
+ rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 3, val8 & 0x7f);
+
+ /* Poll for data read */
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+ for (i = 0; i < RTL8XXXU_MAX_REG_POLL; i++) {
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+ if (val32 & BIT(31))
+ break;
+ }
+
+ if (i == RTL8XXXU_MAX_REG_POLL)
+ return -EIO;
+
+ udelay(50);
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+
+ *data = val32 & 0xff;
+ return 0;
+}
+
+static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int i, ret = 0;
+ u8 val8, word_mask, header, extheader;
+ u16 val16, efuse_addr, offset;
+ u32 val32;
+
+ val16 = rtl8xxxu_read16(priv, REG_9346CR);
+ if (val16 & EEPROM_ENABLE)
+ priv->has_eeprom = 1;
+ if (val16 & EEPROM_BOOT)
+ priv->boot_eeprom = 1;
+
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST);
+ val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT;
+ rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32);
+
+ dev_dbg(dev, "Booting from %s\n",
+ priv->boot_eeprom ? "EEPROM" : "EFUSE");
+
+ rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_ENABLE);
+
+ /* 1.2V Power: From VDDON with Power Cut(0x0000[15]), default valid */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+ if (!(val16 & SYS_ISO_PWC_EV12V)) {
+ val16 |= SYS_ISO_PWC_EV12V;
+ rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+ }
+ /* Reset: 0x0000[28], default valid */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ if (!(val16 & SYS_FUNC_ELDR)) {
+ val16 |= SYS_FUNC_ELDR;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+ }
+
+ /*
+ * Clock: Gated(0x0008[5]) 8M(0x0008[1]) clock from ANA, default valid
+ */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR);
+ if (!(val16 & SYS_CLK_LOADER_ENABLE) || !(val16 & SYS_CLK_ANA8M)) {
+ val16 |= (SYS_CLK_LOADER_ENABLE | SYS_CLK_ANA8M);
+ rtl8xxxu_write16(priv, REG_SYS_CLKR, val16);
+ }
+
+ /* Default value is 0xff */
+ memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN_8723A);
+
+ efuse_addr = 0;
+ while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) {
+ ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &header);
+ if (ret || header == 0xff)
+ goto exit;
+
+ if ((header & 0x1f) == 0x0f) { /* extended header */
+ offset = (header & 0xe0) >> 5;
+
+ ret = rtl8xxxu_read_efuse8(priv, efuse_addr++,
+ &extheader);
+ if (ret)
+ goto exit;
+ /* All words disabled */
+ if ((extheader & 0x0f) == 0x0f)
+ continue;
+
+ offset |= ((extheader & 0xf0) >> 1);
+ word_mask = extheader & 0x0f;
+ } else {
+ offset = (header >> 4) & 0x0f;
+ word_mask = header & 0x0f;
+ }
+
+ if (offset < EFUSE_MAX_SECTION_8723A) {
+ u16 map_addr;
+ /* Get word enable value from PG header */
+
+ /* We have 8 bits to indicate validity */
+ map_addr = offset * 8;
+ if (map_addr >= EFUSE_MAP_LEN_8723A) {
+ dev_warn(dev, "%s: Illegal map_addr (%04x), "
+ "efuse corrupt!\n",
+ __func__, map_addr);
+ ret = -EINVAL;
+ goto exit;
+ }
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(word_mask & BIT(i))) {
+ ret = rtl8xxxu_read_efuse8(priv,
+ efuse_addr++,
+ &val8);
+ if (ret)
+ goto exit;
+ priv->efuse_wifi.raw[map_addr++] = val8;
+
+ ret = rtl8xxxu_read_efuse8(priv,
+ efuse_addr++,
+ &val8);
+ if (ret)
+ goto exit;
+ priv->efuse_wifi.raw[map_addr++] = val8;
+ } else
+ map_addr += 2;
+ }
+ } else {
+ dev_warn(dev,
+ "%s: Illegal offset (%04x), efuse corrupt!\n",
+ __func__, offset);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+exit:
+ rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_DISABLE);
+
+ return ret;
+}
+
+static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int ret = 0, i;
+ u32 val32;
+
+ /* Poll checksum report */
+ for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+ val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+ if (val32 & MCU_FW_DL_CSUM_REPORT)
+ break;
+ }
+
+ if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+ dev_warn(dev, "Firmware checksum poll timed out\n");
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+ val32 |= MCU_FW_DL_READY;
+ val32 &= ~MCU_WINT_INIT_READY;
+ rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32);
+
+ /* Wait for firmware to become ready */
+ for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+ val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+ if (val32 & MCU_WINT_INIT_READY)
+ break;
+
+ udelay(100);
+ }
+
+ if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+ dev_warn(dev, "Firmware failed to start\n");
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
+{
+ int pages, remainder, i, ret;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ u8 *fwptr;
+
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC + 1);
+ val8 |= 4;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, val8);
+
+ /* 8051 enable */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE);
+
+ /* MCU firmware download enable */
+ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE);
+
+ /* 8051 reset */
+ val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+ rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19));
+
+ /* Reset firmware download checksum */
+ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT);
+
+ pages = priv->fw_size / RTL_FW_PAGE_SIZE;
+ remainder = priv->fw_size % RTL_FW_PAGE_SIZE;
+
+ fwptr = priv->fw_data->data;
+
+ for (i = 0; i < pages; i++) {
+ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+
+ ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+ fwptr, RTL_FW_PAGE_SIZE);
+ if (ret != RTL_FW_PAGE_SIZE) {
+ ret = -EAGAIN;
+ goto fw_abort;
+ }
+
+ fwptr += RTL_FW_PAGE_SIZE;
+ }
+
+ if (remainder) {
+ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+ ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+ fwptr, remainder);
+ if (ret != remainder) {
+ ret = -EAGAIN;
+ goto fw_abort;
+ }
+ }
+
+ ret = 0;
+fw_abort:
+ /* MCU firmware download disable */
+ val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL);
+ rtl8xxxu_write16(priv, REG_MCU_FW_DL,
+ val16 & (~MCU_FW_DL_ENABLE & 0xff));
+
+ return ret;
+}
+
+static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+{
+ struct device *dev = &priv->udev->dev;
+ const struct firmware *fw;
+ int ret = 0;
+ u16 signature;
+
+ dev_info(dev, "%s: Loading firmware %s\n", DRIVER_NAME, fw_name);
+ if (request_firmware(&fw, fw_name, &priv->udev->dev)) {
+ dev_warn(dev, "request_firmware(%s) failed\n", fw_name);
+ ret = -EAGAIN;
+ goto exit;
+ }
+ if (!fw) {
+ dev_warn(dev, "Firmware data not available\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header);
+
+ signature = le16_to_cpu(priv->fw_data->signature);
+ switch (signature & 0xfff0) {
+ case 0x92c0:
+ case 0x88c0:
+ case 0x2300:
+ break;
+ default:
+ ret = -EINVAL;
+ dev_warn(dev, "%s: Invalid firmware signature: 0x%04x\n",
+ __func__, signature);
+ }
+
+ dev_info(dev, "Firmware revision %i.%i (signature 0x%04x)\n",
+ le16_to_cpu(priv->fw_data->major_version),
+ priv->fw_data->minor_version, signature);
+
+exit:
+ release_firmware(fw);
+ return ret;
+}
+
+static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ switch (priv->chip_cut) {
+ case 0:
+ fw_name = "rtlwifi/rtl8723aufw_A.bin";
+ break;
+ case 1:
+ if (priv->enable_bluetooth)
+ fw_name = "rtlwifi/rtl8723aufw_B.bin";
+ else
+ fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+ return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ if (!priv->vendor_umc)
+ fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ else if (priv->chip_cut || priv->rtlchip == 0x8192c)
+ fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ else
+ fw_name = "rtlwifi/rtl8192cufw_A.bin";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+#endif
+
+static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ int i = 100;
+
+ /* Inform 8051 to perform reset */
+ rtl8xxxu_write8(priv, REG_HMTFR + 3, 0x20);
+
+ for (i = 100; i > 0; i--) {
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+
+ if (!(val16 & SYS_FUNC_CPU_ENABLE)) {
+ dev_dbg(&priv->udev->dev,
+ "%s: Firmware self reset success!\n", __func__);
+ break;
+ }
+ udelay(50);
+ }
+
+ if (!i) {
+ /* Force firmware reset */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+ }
+}
+
+static int
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+{
+ int i, ret;
+ u16 reg;
+ u8 val;
+
+ for (i = 0; ; i++) {
+ reg = array[i].reg;
+ val = array[i].val;
+
+ if (reg == 0xffff && val == 0xff)
+ break;
+
+ ret = rtl8xxxu_write8(priv, reg, val);
+ if (ret != 1) {
+ dev_warn(&priv->udev->dev,
+ "Failed to initialize MAC\n");
+ return -EAGAIN;
+ }
+ }
+
+ rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
+
+ return 0;
+}
+
+static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_reg32val *array)
+{
+ int i, ret;
+ u16 reg;
+ u32 val;
+
+ for (i = 0; ; i++) {
+ reg = array[i].reg;
+ val = array[i].val;
+
+ if (reg == 0xffff && val == 0xffffffff)
+ break;
+
+ ret = rtl8xxxu_write32(priv, reg, val);
+ if (ret != sizeof(val)) {
+ dev_warn(&priv->udev->dev,
+ "Failed to initialize PHY\n");
+ return -EAGAIN;
+ }
+ udelay(1);
+ }
+
+ return 0;
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
+ u32 val32;
+
+ /*
+ * Todo: The vendor driver maintains a table of PHY register
+ * addresses, which is initialized here. Do we need this?
+ */
+
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ udelay(2);
+ val8 |= AFE_PLL_320_ENABLE;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+ udelay(2);
+
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+ udelay(2);
+
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 &= ~AFE_XTAL_RF_GATE;
+ if (priv->has_bluetooth)
+ val32 &= ~AFE_XTAL_BT_GATE;
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+ /* 6. 0x1f[7:0] = 0x07 */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ if (priv->hi_pa)
+ rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
+ else if (priv->tx_paths == 2)
+ rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
+ else
+ rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
+
+
+ if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+ priv->vendor_umc && priv->chip_cut == 1)
+ rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
+
+ if (priv->tx_paths == 1 && priv->rx_paths == 2) {
+ /*
+ * For 1T2R boards, patch the registers.
+ *
+ * It looks like 8191/2 1T2R boards use path B for TX
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_TX_INFO);
+ val32 &= ~(BIT(0) | BIT(1));
+ val32 |= BIT(1);
+ rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_TX_INFO);
+ val32 &= ~0x300033;
+ val32 |= 0x200022;
+ rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ val32 &= 0xff000000;
+ val32 |= 0x45000000;
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+ val32 |= (OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B |
+ OFDM_RF_PATH_TX_B);
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGC_PARM1);
+ val32 &= ~(BIT(4) | BIT(5));
+ val32 |= BIT(4);
+ rtl8xxxu_write32(priv, REG_OFDM0_AGC_PARM1, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_CCK_RFON);
+ val32 &= ~(BIT(27) | BIT(26));
+ val32 |= BIT(27);
+ rtl8xxxu_write32(priv, REG_TX_CCK_RFON, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_CCK_BBON);
+ val32 &= ~(BIT(27) | BIT(26));
+ val32 |= BIT(27);
+ rtl8xxxu_write32(priv, REG_TX_CCK_BBON, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_RFON);
+ val32 &= ~(BIT(27) | BIT(26));
+ val32 |= BIT(27);
+ rtl8xxxu_write32(priv, REG_TX_OFDM_RFON, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_BBON);
+ val32 &= ~(BIT(27) | BIT(26));
+ val32 |= BIT(27);
+ rtl8xxxu_write32(priv, REG_TX_OFDM_BBON, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_TO_TX);
+ val32 &= ~(BIT(27) | BIT(26));
+ val32 |= BIT(27);
+ rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
+ }
+
+ if (priv->hi_pa)
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+ else
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
+
+ if (priv->rtlchip == 0x8723a &&
+ priv->efuse_wifi.efuse8723.version >= 0x01) {
+ val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
+
+ val8 = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+ val32 &= 0xff000fff;
+ val32 |= ((val8 | (val8 << 6)) << 12);
+
+ rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
+ }
+
+ ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+ ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+ ldohci12 = 0x57;
+ lpldo = 1;
+ val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+
+ rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+
+ return 0;
+}
+
+static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rfregval *array,
+ enum rtl8xxxu_rfpath path)
+{
+ int i, ret;
+ u8 reg;
+ u32 val;
+
+ for (i = 0; ; i++) {
+ reg = array[i].reg;
+ val = array[i].val;
+
+ if (reg == 0xff && val == 0xffffffff)
+ break;
+
+ switch (reg) {
+ case 0xfe:
+ msleep(50);
+ continue;
+ case 0xfd:
+ mdelay(5);
+ continue;
+ case 0xfc:
+ mdelay(1);
+ continue;
+ case 0xfb:
+ udelay(50);
+ continue;
+ case 0xfa:
+ udelay(5);
+ continue;
+ case 0xf9:
+ udelay(1);
+ continue;
+ }
+
+ reg &= 0x3f;
+
+ ret = rtl8xxxu_write_rfreg(priv, path, reg, val);
+ if (ret) {
+ dev_warn(&priv->udev->dev,
+ "Failed to initialize RF\n");
+ return -EAGAIN;
+ }
+ udelay(1);
+ }
+
+ return 0;
+}
+
+static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rfregval *table,
+ enum rtl8xxxu_rfpath path)
+{
+ u32 val32;
+ u16 val16, rfsi_rfenv;
+ u16 reg_sw_ctrl, reg_int_oe, reg_hssi_parm2;
+
+ switch (path) {
+ case RF_A:
+ reg_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL;
+ reg_int_oe = REG_FPGA0_XA_RF_INT_OE;
+ reg_hssi_parm2 = REG_FPGA0_XA_HSSI_PARM2;
+ break;
+ case RF_B:
+ reg_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL;
+ reg_int_oe = REG_FPGA0_XB_RF_INT_OE;
+ reg_hssi_parm2 = REG_FPGA0_XB_HSSI_PARM2;
+ break;
+ default:
+ dev_err(&priv->udev->dev, "%s:Unsupported RF path %c\n",
+ __func__, path + 'A');
+ return -EINVAL;
+ }
+ /* For path B, use XB */
+ rfsi_rfenv = rtl8xxxu_read16(priv, reg_sw_ctrl);
+ rfsi_rfenv &= FPGA0_RF_RFENV;
+
+ /*
+ * These two we might be able to optimize into one
+ */
+ val32 = rtl8xxxu_read32(priv, reg_int_oe);
+ val32 |= BIT(20); /* 0x10 << 16 */
+ rtl8xxxu_write32(priv, reg_int_oe, val32);
+ udelay(1);
+
+ val32 = rtl8xxxu_read32(priv, reg_int_oe);
+ val32 |= BIT(4);
+ rtl8xxxu_write32(priv, reg_int_oe, val32);
+ udelay(1);
+
+ /*
+ * These two we might be able to optimize into one
+ */
+ val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+ val32 &= ~FPGA0_HSSI_3WIRE_ADDR_LEN;
+ rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+ udelay(1);
+
+ val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+ val32 &= ~FPGA0_HSSI_3WIRE_DATA_LEN;
+ rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+ udelay(1);
+
+ rtl8xxxu_init_rf_regs(priv, table, path);
+
+ /* For path B, use XB */
+ val16 = rtl8xxxu_read16(priv, reg_sw_ctrl);
+ val16 &= ~FPGA0_RF_RFENV;
+ val16 |= rfsi_rfenv;
+ rtl8xxxu_write16(priv, reg_sw_ctrl, val16);
+
+ return 0;
+}
+
+static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
+{
+ int ret = -EBUSY;
+ int count = 0;
+ u32 value;
+
+ value = LLT_OP_WRITE | address << 8 | data;
+
+ rtl8xxxu_write32(priv, REG_LLT_INIT, value);
+
+ do {
+ value = rtl8xxxu_read32(priv, REG_LLT_INIT);
+ if ((value & LLT_OP_MASK) == LLT_OP_INACTIVE) {
+ ret = 0;
+ break;
+ }
+ } while (count++ < 20);
+
+ return ret;
+}
+
+static int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < last_tx_page; i++) {
+ ret = rtl8xxxu_llt_write(priv, i, i + 1);
+ if (ret)
+ goto exit;
+ }
+
+ ret = rtl8xxxu_llt_write(priv, last_tx_page, 0xff);
+ if (ret)
+ goto exit;
+
+ /* Mark remaining pages as a ring buffer */
+ for (i = last_tx_page + 1; i < 0xff; i++) {
+ ret = rtl8xxxu_llt_write(priv, i, (i + 1));
+ if (ret)
+ goto exit;
+ }
+
+ /* Let last entry point to the start entry of ring buffer */
+ ret = rtl8xxxu_llt_write(priv, 0xff, last_tx_page + 1);
+ if (ret)
+ goto exit;
+
+exit:
+ return ret;
+}
+
+static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv)
+{
+ u16 val16, hi, lo;
+ u16 hiq, mgq, bkq, beq, viq, voq;
+ int hip, mgp, bkp, bep, vip, vop;
+ int ret = 0;
+
+ switch (priv->ep_tx_count) {
+ case 1:
+ if (priv->ep_tx_high_queue) {
+ hi = TRXDMA_QUEUE_HIGH;
+ } else if (priv->ep_tx_low_queue) {
+ hi = TRXDMA_QUEUE_LOW;
+ } else if (priv->ep_tx_normal_queue) {
+ hi = TRXDMA_QUEUE_NORMAL;
+ } else {
+ hi = 0;
+ ret = -EINVAL;
+ }
+
+ hiq = hi;
+ mgq = hi;
+ bkq = hi;
+ beq = hi;
+ viq = hi;
+ voq = hi;
+
+ hip = 0;
+ mgp = 0;
+ bkp = 0;
+ bep = 0;
+ vip = 0;
+ vop = 0;
+ break;
+ case 2:
+ if (priv->ep_tx_high_queue && priv->ep_tx_low_queue) {
+ hi = TRXDMA_QUEUE_HIGH;
+ lo = TRXDMA_QUEUE_LOW;
+ } else if (priv->ep_tx_normal_queue && priv->ep_tx_low_queue) {
+ hi = TRXDMA_QUEUE_NORMAL;
+ lo = TRXDMA_QUEUE_LOW;
+ } else if (priv->ep_tx_high_queue && priv->ep_tx_normal_queue) {
+ hi = TRXDMA_QUEUE_HIGH;
+ lo = TRXDMA_QUEUE_NORMAL;
+ } else {
+ ret = -EINVAL;
+ hi = 0;
+ lo = 0;
+ }
+
+ hiq = hi;
+ mgq = hi;
+ bkq = lo;
+ beq = lo;
+ viq = hi;
+ voq = hi;
+
+ hip = 0;
+ mgp = 0;
+ bkp = 1;
+ bep = 1;
+ vip = 0;
+ vop = 0;
+ break;
+ case 3:
+ beq = TRXDMA_QUEUE_LOW;
+ bkq = TRXDMA_QUEUE_LOW;
+ viq = TRXDMA_QUEUE_NORMAL;
+ voq = TRXDMA_QUEUE_HIGH;
+ mgq = TRXDMA_QUEUE_HIGH;
+ hiq = TRXDMA_QUEUE_HIGH;
+
+ hip = hiq ^ 3;
+ mgp = mgq ^ 3;
+ bkp = bkq ^ 3;
+ bep = beq ^ 3;
+ vip = viq ^ 3;
+ vop = viq ^ 3;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ /*
+ * None of the vendor drivers are configuring the beacon
+ * queue here .... why?
+ */
+ if (!ret) {
+ val16 = rtl8xxxu_read16(priv, REG_TRXDMA_CTRL);
+ val16 &= 0x7;
+ val16 |= (voq << TRXDMA_CTRL_VOQ_SHIFT) |
+ (viq << TRXDMA_CTRL_VIQ_SHIFT) |
+ (beq << TRXDMA_CTRL_BEQ_SHIFT) |
+ (bkq << TRXDMA_CTRL_BKQ_SHIFT) |
+ (mgq << TRXDMA_CTRL_MGQ_SHIFT) |
+ (hiq << TRXDMA_CTRL_HIQ_SHIFT);
+ rtl8xxxu_write16(priv, REG_TRXDMA_CTRL, val16);
+
+ priv->pipe_out[TXDESC_QUEUE_VO] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[vop]);
+ priv->pipe_out[TXDESC_QUEUE_VI] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[vip]);
+ priv->pipe_out[TXDESC_QUEUE_BE] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[bep]);
+ priv->pipe_out[TXDESC_QUEUE_BK] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[bkp]);
+ priv->pipe_out[TXDESC_QUEUE_BEACON] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+ priv->pipe_out[TXDESC_QUEUE_MGNT] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[mgp]);
+ priv->pipe_out[TXDESC_QUEUE_HIGH] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[hip]);
+ priv->pipe_out[TXDESC_QUEUE_CMD] =
+ usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+ }
+
+ return ret;
+}
+
+static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
+ bool iqk_ok, int result[][8],
+ int candidate, bool tx_only)
+{
+ u32 oldval, x, tx0_a, reg;
+ int y, tx0_c;
+ u32 val32;
+
+ if (!iqk_ok)
+ return;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+ oldval = val32 >> 22;
+
+ x = result[candidate][0];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xfffffc00;
+ tx0_a = (x * oldval) >> 8;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+ val32 &= ~0x3ff;
+ val32 |= tx0_a;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+ val32 &= ~BIT(31);
+ if ((x * oldval >> 7) & 0x1)
+ val32 |= BIT(31);
+ rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+ y = result[candidate][1];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xfffffc00;
+ tx0_c = (y * oldval) >> 8;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XC_TX_AFE);
+ val32 &= ~0xf0000000;
+ val32 |= (((tx0_c & 0x3c0) >> 6) << 28);
+ rtl8xxxu_write32(priv, REG_OFDM0_XC_TX_AFE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+ val32 &= ~0x003f0000;
+ val32 |= ((tx0_c & 0x3f) << 16);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+ val32 &= ~BIT(29);
+ if ((y * oldval >> 7) & 0x1)
+ val32 |= BIT(29);
+ rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+ if (tx_only) {
+ dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+ return;
+ }
+
+ reg = result[candidate][2];
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+ val32 &= ~0x3ff;
+ val32 |= (reg & 0x3ff);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+ reg = result[candidate][3] & 0x3F;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+ val32 &= ~0xfc00;
+ val32 |= ((reg << 10) & 0xfc00);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+ reg = (result[candidate][3] >> 6) & 0xF;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_IQ_EXT_ANTA);
+ val32 &= ~0xf0000000;
+ val32 |= (reg << 28);
+ rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32);
+}
+
+static void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv,
+ bool iqk_ok, int result[][8],
+ int candidate, bool tx_only)
+{
+ u32 oldval, x, tx1_a, reg;
+ int y, tx1_c;
+ u32 val32;
+
+ if (!iqk_ok)
+ return;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+ oldval = val32 >> 22;
+
+ x = result[candidate][4];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xfffffc00;
+ tx1_a = (x * oldval) >> 8;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+ val32 &= ~0x3ff;
+ val32 |= tx1_a;
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+ val32 &= ~BIT(27);
+ if ((x * oldval >> 7) & 0x1)
+ val32 |= BIT(27);
+ rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+ y = result[candidate][5];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xfffffc00;
+ tx1_c = (y * oldval) >> 8;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XD_TX_AFE);
+ val32 &= ~0xf0000000;
+ val32 |= (((tx1_c & 0x3c0) >> 6) << 28);
+ rtl8xxxu_write32(priv, REG_OFDM0_XD_TX_AFE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+ val32 &= ~0x003f0000;
+ val32 |= ((tx1_c & 0x3f) << 16);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+ val32 &= ~BIT(25);
+ if ((y * oldval >> 7) & 0x1)
+ val32 |= BIT(25);
+ rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+ if (tx_only) {
+ dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+ return;
+ }
+
+ reg = result[candidate][6];
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+ val32 &= ~0x3ff;
+ val32 |= (reg & 0x3ff);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+ reg = result[candidate][7] & 0x3f;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+ val32 &= ~0xfc00;
+ val32 |= ((reg << 10) & 0xfc00);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+ reg = (result[candidate][7] >> 6) & 0xf;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGCR_SSI_TABLE);
+ val32 &= ~0x0000f000;
+ val32 |= (reg << 12);
+ rtl8xxxu_write32(priv, REG_OFDM0_AGCR_SSI_TABLE, val32);
+}
+
+#define MAX_TOLERANCE 5
+
+static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2)
+{
+ u32 i, j, diff, simubitmap, bound = 0;
+ int candidate[2] = {-1, -1}; /* for path A and path B */
+ bool retval = true;
+
+ if (priv->tx_paths > 1)
+ bound = 8;
+ else
+ bound = 4;
+
+ simubitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = (result[c1][i] > result[c2][i]) ?
+ (result[c1][i] - result[c2][i]) :
+ (result[c2][i] - result[c1][i]);
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !simubitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ candidate[(i / 4)] = c1;
+ else
+ simubitmap = simubitmap | (1 << i);
+ } else {
+ simubitmap = simubitmap | (1 << i);
+ }
+ }
+ }
+
+ if (simubitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (candidate[i] >= 0) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] = result[candidate[i]][j];
+ retval = false;
+ }
+ }
+ return retval;
+ } else if (!(simubitmap & 0x0f)) {
+ /* path A OK */
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ } else if (!(simubitmap & 0xf0) && priv->tx_paths > 1) {
+ /* path B OK */
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ return false;
+}
+
+static void
+rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
+{
+ int i;
+
+ for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+ backup[i] = rtl8xxxu_read8(priv, reg[i]);
+
+ backup[i] = rtl8xxxu_read32(priv, reg[i]);
+}
+
+static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+ const u32 *reg, u32 *backup)
+{
+ int i;
+
+ for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+ rtl8xxxu_write8(priv, reg[i], backup[i]);
+
+ rtl8xxxu_write32(priv, reg[i], backup[i]);
+}
+
+static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ backup[i] = rtl8xxxu_read32(priv, regs[i]);
+}
+
+static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ rtl8xxxu_write32(priv, regs[i], backup[i]);
+}
+
+
+static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+ bool path_a_on)
+{
+ u32 path_on;
+ int i;
+
+ path_on = path_a_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (priv->tx_paths == 1) {
+ path_on = 0x0bdb25a0;
+ rtl8xxxu_write32(priv, regs[0], 0x0b1b25a0);
+ } else {
+ rtl8xxxu_write32(priv, regs[0], path_on);
+ }
+
+ for (i = 1 ; i < RTL8XXXU_ADDA_REGS ; i++)
+ rtl8xxxu_write32(priv, regs[i], path_on);
+}
+
+static void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+ const u32 *regs, u32 *backup)
+{
+ int i = 0;
+
+ rtl8xxxu_write8(priv, regs[i], 0x3f);
+
+ for (i = 1 ; i < (RTL8XXXU_MAC_REGS - 1); i++)
+ rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(3)));
+
+ rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(5)));
+}
+
+static int rtl8xxxu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c, reg_ea4, val32;
+ int result = 0;
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1f);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140102);
+
+ val32 = (priv->rf_paths > 1) ? 0x28160202 :
+ /*IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID)?0x28160202: */
+ 0x28160502;
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, val32);
+
+ /* path-B IQK setting */
+ if (priv->rf_paths > 1) {
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x10008c22);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x10008c22);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82140102);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28160202);
+ }
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x001028d1);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+ /* If TX is OK, check whether RX is OK */
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+ __func__);
+out:
+ return result;
+}
+
+static int rtl8xxxu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ int result = 0;
+
+ /* One shot, path B LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
+
+ mdelay(1);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+ reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+ if (!(reg_eac & BIT(31)) &&
+ ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+ ((reg_ebc & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else
+ goto out;
+
+ if (!(reg_eac & BIT(30)) &&
+ (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
+ (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+ __func__);
+out:
+ return result;
+}
+
+static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok, path_b_ok;
+ int retry = 2;
+ const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+ };
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ if (t == 0) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+ if (val32 & FPGA0_HSSI_PARM1_PI)
+ priv->pi_enabled = 1;
+ }
+
+ if (!priv->pi_enabled) {
+ /* Switch BB to PI mode to do IQ Calibration. */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE_CCK;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+ val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+ val32 &= ~BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+ val32 &= ~BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+ if (priv->tx_paths > 1) {
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, 0x00010000);
+ }
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Page B init */
+ rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x00080000);
+
+ if (priv->tx_paths > 1)
+ rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x00080000);
+
+ /* IQ calibration setting */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8xxxu_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+ break;
+ } else if (i == (retry - 1) && path_a_ok == 0x01) {
+ /* TX IQK OK */
+ dev_dbg(dev, "%s: Path A IQK Only Tx Success!!\n",
+ __func__);
+
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A IQK failed!\n", __func__);
+
+ if (priv->tx_paths > 1) {
+ /*
+ * Path A into standby
+ */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x0);
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* Turn Path B ADDA on */
+ rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8xxxu_iqk_path_b(priv);
+ if (path_b_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ result[t][4] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ result[t][5] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+ result[t][6] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+ result[t][7] = (val32 >> 16) & 0x3ff;
+ break;
+ } else if (i == (retry - 1) && path_b_ok == 0x01) {
+ /* TX IQK OK */
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ result[t][4] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ result[t][5] = (val32 >> 16) & 0x3ff;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+ }
+
+ /* Back to BB mode, load original value */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0);
+
+ if (t) {
+ if (!priv->pi_enabled) {
+ /*
+ * Switch back BB to SI mode after finishing
+ * IQ Calibration
+ */
+ val32 = 0x01000000;
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, val32);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, val32);
+ }
+
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00032ed3);
+
+ if (priv->tx_paths > 1) {
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM,
+ 0x00032ed3);
+ }
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+ }
+}
+
+static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok, path_b_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ s32 reg_tmp = 0;
+ bool simu;
+
+ memset(result, 0, sizeof(result));
+ candidate = -1;
+
+ path_a_ok = false;
+ path_b_ok = false;
+
+ rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+
+ for (i = 0; i < 3; i++) {
+ rtl8xxxu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_simularity_compare(priv, result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_simularity_compare(priv, result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_simularity_compare(priv, result, 1, 2);
+ if (simu) {
+ candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp)
+ candidate = 3;
+ else
+ candidate = -1;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+ "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+ reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ path_b_ok = true;
+ } else {
+ reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+ reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ if (priv->tx_paths > 1 && reg_eb4)
+ rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+ candidate, (reg_ec4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+ u32 rf_amode, rf_bmode = 0, lstf;
+
+ /* Check continuous TX and Packet TX */
+ lstf = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+
+ if (lstf & OFDM_LSTF_MASK) {
+ /* Disable all continuous TX */
+ val32 = lstf & ~OFDM_LSTF_MASK;
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+ /* Read original RF mode Path A */
+ rf_amode = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_AC);
+
+ /* Set RF mode to standby Path A */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC,
+ (rf_amode & 0x8ffff) | 0x10000);
+
+ /* Path-B */
+ if (priv->tx_paths > 1) {
+ rf_bmode = rtl8xxxu_read_rfreg(priv, RF_B,
+ RF6052_REG_AC);
+
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+ (rf_bmode & 0x8ffff) | 0x10000);
+ }
+ } else {
+ /* Deal with Packet TX case */
+ /* block all queues */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+ }
+
+ /* Start LC calibration */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG);
+ val32 |= 0x08000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32);
+
+ msleep(100);
+
+ /* Restore original parameters */
+ if (lstf & OFDM_LSTF_MASK) {
+ /* Path-A */
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, lstf);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, rf_amode);
+
+ /* Path-B */
+ if (priv->tx_paths > 1)
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+ rf_bmode);
+ } else /* Deal with Packet TX case */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+{
+ int i;
+ u16 reg;
+
+ reg = REG_MACID;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+
+ return 0;
+}
+
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+{
+ int i;
+ u16 reg;
+
+ dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
+
+ reg = REG_BSSID;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtl8xxxu_write8(priv, reg + i, bssid[i]);
+
+ return 0;
+}
+
+static void
+rtl8xxxu_set_ampdu_factor(struct rtl8xxxu_priv *priv, u8 ampdu_factor)
+{
+ u8 vals[4] = { 0x41, 0xa8, 0x72, 0xb9 };
+ u8 max_agg = 0xf;
+ int i;
+
+ ampdu_factor = 1 << (ampdu_factor + 2);
+ if (ampdu_factor > max_agg)
+ ampdu_factor = max_agg;
+
+ for (i = 0; i < 4; i++) {
+ if ((vals[i] & 0xf0) > (ampdu_factor << 4))
+ vals[i] = (vals[i] & 0x0f) | (ampdu_factor << 4);
+
+ if ((vals[i] & 0x0f) > ampdu_factor)
+ vals[i] = (vals[i] & 0xf0) | ampdu_factor;
+
+ rtl8xxxu_write8(priv, REG_AGGLEN_LMT + i, vals[i]);
+ }
+}
+
+static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density)
+{
+ u8 val8;
+
+ val8 = rtl8xxxu_read8(priv, REG_AMPDU_MIN_SPACE);
+ val8 &= 0xf8;
+ val8 |= density;
+ rtl8xxxu_write8(priv, REG_AMPDU_MIN_SPACE, val8);
+}
+
+static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ int count, ret;
+
+ /* Start of rtl8723AU_card_enable_flow */
+ /* Act to Cardemu sequence*/
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* 0x004E[7] = 0, switch DPDT_SEL_P output from register 0x0065[2] */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 &= ~LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+ /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ if ((val8 & BIT(1)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+ __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 |= SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* 0x0020[0] = 0 disable LDOA12 MACRO block*/
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 &= ~LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u8 val32;
+ int count, ret;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ /*
+ * Poll - wait for RX packet to complete
+ */
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, 0x5f8);
+ if (!val32)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev,
+ "%s: RX poll timed out (0x05f8)\n", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Disable CCK and OFDM, clock gated */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ udelay(2);
+
+ /* Reset baseband */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /* Reset MAC TRX */
+ val8 = rtl8xxxu_read8(priv, REG_CR);
+ val8 = CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE;
+ rtl8xxxu_write8(priv, REG_CR, val8);
+
+ /* Reset MAC TRX */
+ val8 = rtl8xxxu_read8(priv, REG_CR + 1);
+ val8 &= ~BIT(1); /* CR_SECURITY_ENABLE */
+ rtl8xxxu_write8(priv, REG_CR + 1, val8);
+
+ /* Respond TX OK to scheduler */
+ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+ val8 |= DUAL_TSF_TX_OK;
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+exit:
+ return ret;
+}
+
+static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* Clear suspend enable and power down enable*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(7));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* 0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+ /* 0x04[12:11] = 11 enable WL suspend*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+}
+
+static int rtl8xxxu_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 |= LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+ /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ mdelay(1);
+
+ /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* disable SW LPS 0x04[10]= 0 */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(2);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* release WLON reset 0x04[16]= 1*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /* disable HWPDN 0x04[15]= 0*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(7);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable WL suspend*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
+ /*
+ * Note: Vendor driver actually clears this bit, despite the
+ * documentation claims it's being set!
+ */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 |= LEDCFG2_DPDT_SELECT;
+ val8 &= ~LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* 0x0007[7:0] = 0x20 SOP option to disable BG/MB */
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x20);
+
+ /* 0x04[12:11] = 01 enable WL suspend */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(4);
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(7);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+ return 0;
+}
+
+static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ /*
+ * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+ */
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+
+ rtl8xxxu_disabled_to_emu(priv);
+
+ ret = rtl8xxxu_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * 0x0004[19] = 1, reset 8051
+ */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /* For EFuse PG */
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+ val32 &= ~(BIT(28) | BIT(29) | BIT(30));
+ val32 |= (0x06 << 28);
+ rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
+exit:
+ return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int i;
+
+ for (i = 100; i; i--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+ if (val8 & APS_FSMCO_PFM_ALDN)
+ break;
+ }
+
+ if (!i) {
+ pr_info("%s: Poll failed\n", __func__);
+ return -ENODEV;
+ }
+
+ /*
+ * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+ */
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+ rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
+ udelay(100);
+
+ val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
+ if (!(val8 & LDOV12D_ENABLE)) {
+ pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
+ val8 |= LDOV12D_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
+
+ udelay(100);
+
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_MD2PP;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+ }
+
+ /*
+ * Auto enable WLAN
+ */
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ for (i = 1000; i; i--) {
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ if (!(val16 & APS_FSMCO_MAC_ENABLE))
+ break;
+ }
+ if (!i) {
+ pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Enable radio, GPIO, LED
+ */
+ val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
+ APS_FSMCO_PFM_ALDN;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ /*
+ * Release RF digital isolation
+ */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+ val16 &= ~SYS_ISO_DIOR;
+ rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+ val8 &= ~APSD_CTRL_OFF;
+ rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
+ for (i = 200; i; i--) {
+ val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+ if (!(val8 & APSD_CTRL_OFF_STATUS))
+ break;
+ }
+
+ if (!i) {
+ pr_info("%s: APSD_CTRL poll failed\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
+ CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /*
+ * Workaround for 8188RU LNA power leakage problem.
+ */
+ if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+ val32 &= ~BIT(1);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+ }
+ return 0;
+}
+
+#endif
+
+static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+
+ /*
+ * Workaround for 8188RU LNA power leakage problem.
+ */
+ if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+ val32 |= BIT(1);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+ }
+
+ rtl8xxxu_active_to_lps(priv);
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8xxxu_active_to_emu(priv);
+ rtl8xxxu_emu_to_disabled(priv);
+
+ /* Reset MCU IO Wrapper */
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ /* RSV_CTRL 0x1C[7:0] = 0x0e lock ISO/CLK/Power control register */
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
+}
+
+static void rtl8xxxu_init_bt(struct rtl8xxxu_priv *priv)
+{
+ if (!priv->has_bluetooth)
+ return;
+}
+
+static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_rfregval *rftable;
+ bool macpower;
+ int ret;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+
+ /* Check if MAC is already powered on */
+ val8 = rtl8xxxu_read8(priv, REG_CR);
+
+ /*
+ * Fix 92DU-VC S3 hang with the reason is that secondary mac is not
+ * initialized. First MAC returns 0xea, second MAC returns 0x00
+ */
+ if (val8 == 0xea)
+ macpower = false;
+ else
+ macpower = true;
+
+ ret = priv->fops->power_on(priv);
+ if (ret < 0) {
+ dev_warn(dev, "%s: Failed power on\n", __func__);
+ goto exit;
+ }
+
+ dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+ if (!macpower) {
+ ret = rtl8xxxu_init_llt_table(priv, TX_TOTAL_PAGE_NUM);
+ if (ret) {
+ dev_warn(dev, "%s: LLT table init failed\n", __func__);
+ goto exit;
+ }
+ }
+
+ ret = rtl8xxxu_download_firmware(priv);
+ dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+ if (ret)
+ goto exit;
+ ret = rtl8xxxu_start_firmware(priv);
+ dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+ if (ret)
+ goto exit;
+
+ ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+ dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
+ if (ret)
+ goto exit;
+
+ ret = rtl8xxxu_init_phy_bb(priv);
+ dev_dbg(dev, "%s: init_phy_bb %i\n", __func__, ret);
+ if (ret)
+ goto exit;
+
+ switch(priv->rtlchip) {
+ case 0x8723a:
+ rftable = rtl8723au_radioa_1t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ break;
+ case 0x8188c:
+ if (priv->hi_pa)
+ rftable = rtl8188ru_radioa_1t_highpa_table;
+ else
+ rftable = rtl8192cu_radioa_1t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ break;
+ case 0x8191c:
+ rftable = rtl8192cu_radioa_1t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ break;
+ case 0x8192c:
+ rftable = rtl8192cu_radioa_2t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ if (ret)
+ break;
+ rftable = rtl8192cu_radiob_2t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto exit;
+
+ /* Reduce 80M spur */
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+ /* RFSW Control - clear bit 14 ?? */
+ rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
+ /* 0x07000760 */
+ val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+ FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
+ ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
+ FPGA0_RF_BD_CTRL_SHIFT);
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ /* 0x860[6:5]= 00 - why? - this sets antenna B */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+
+ priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
+ RF6052_REG_MODE_AG);
+
+ dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+ if (!macpower) {
+ if (priv->ep_tx_normal_queue)
+ val8 = TX_PAGE_NUM_NORM_PQ;
+ else
+ val8 = 0;
+
+ rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
+
+ val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
+
+ if (priv->ep_tx_high_queue)
+ val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+ if (priv->ep_tx_low_queue)
+ val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
+
+ rtl8xxxu_write32(priv, REG_RQPN, val32);
+
+ /*
+ * Set TX buffer boundary
+ */
+ val8 = TX_TOTAL_PAGE_NUM + 1;
+ rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
+ rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
+ rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8);
+ rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8);
+ rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
+ }
+
+ ret = rtl8xxxu_init_queue_priority(priv);
+ dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+ if (ret)
+ goto exit;
+
+ /*
+ * Set RX page boundary
+ */
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
+ /*
+ * Transfer page size is always 128
+ */
+ val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
+ (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
+ rtl8xxxu_write8(priv, REG_PBP, val8);
+
+ /*
+ * Unit in 8 bytes, not obvious what it is used for
+ */
+ rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
+
+ /*
+ * Enable all interrupts - not obvious USB needs to do this
+ */
+ rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+ rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+
+ rtl8xxxu_set_mac(priv);
+ rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
+
+ /*
+ * Configure initial WMAC settings
+ */
+ val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST |
+ /* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */
+ RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
+ RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
+ rtl8xxxu_write32(priv, REG_RCR, val32);
+
+ /*
+ * Accept all multicast
+ */
+ rtl8xxxu_write32(priv, REG_MAR, 0xffffffff);
+ rtl8xxxu_write32(priv, REG_MAR + 4, 0xffffffff);
+
+ /*
+ * Init adaptive controls
+ */
+ val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+ val32 |= RESPONSE_RATE_RRSR_CCK_ONLY_1M;
+ rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+ /* CCK = 0x0a, OFDM = 0x10 */
+ rtl8xxxu_set_spec_sifs(priv, 0x10, 0x10);
+ rtl8xxxu_set_retry(priv, 0x30, 0x30);
+ rtl8xxxu_set_spec_sifs(priv, 0x0a, 0x10);
+
+ /*
+ * Init EDCA
+ */
+ rtl8xxxu_write16(priv, REG_MAC_SPEC_SIFS, 0x100a);
+
+ /* Set CCK SIFS */
+ rtl8xxxu_write16(priv, REG_SIFS_CCK, 0x100a);
+
+ /* Set OFDM SIFS */
+ rtl8xxxu_write16(priv, REG_SIFS_OFDM, 0x100a);
+
+ /* TXOP */
+ rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, 0x005ea42b);
+ rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, 0x0000a44f);
+ rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, 0x005ea324);
+ rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, 0x002fa226);
+
+ /* Set data auto rate fallback retry count */
+ rtl8xxxu_write32(priv, REG_DARFRC, 0x00000000);
+ rtl8xxxu_write32(priv, REG_DARFRC + 4, 0x10080404);
+ rtl8xxxu_write32(priv, REG_RARFRC, 0x04030201);
+ rtl8xxxu_write32(priv, REG_RARFRC + 4, 0x08070605);
+
+ val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL);
+ val8 |= FWHW_TXQ_CTRL_AMPDU_RETRY;
+ rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL, val8);
+
+ /* Set ACK timeout */
+ rtl8xxxu_write8(priv, REG_ACKTO, 0x40);
+
+ /*
+ * Initialize beacon parameters
+ */
+ val16 = BEACON_DISABLE_TSF_UPDATE | (BEACON_DISABLE_TSF_UPDATE << 8);
+ rtl8xxxu_write16(priv, REG_BEACON_CTRL, val16);
+ rtl8xxxu_write16(priv, REG_TBTT_PROHIBIT, 0x6404);
+ rtl8xxxu_write8(priv, REG_DRIVER_EARLY_INT, DRIVER_EARLY_INT_TIME);
+ rtl8xxxu_write8(priv, REG_BEACON_DMA_TIME, BEACON_DMA_ATIME_INT_TIME);
+ rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F);
+
+ /*
+ * Enable CCK and OFDM block
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= (FPGA_RF_MODE_CCK | FPGA_RF_MODE_OFDM);
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ /*
+ * Invalidate all CAM entries - bit 30 is undocumented
+ */
+ rtl8xxxu_write32(priv, REG_CAM_CMD, CAM_CMD_POLLING | BIT(30));
+
+ /*
+ * Start out with default power levels for channel 6, 20MHz
+ */
+ rtl8723a_set_tx_power(priv, 1, false);
+
+ /* Let the 8051 take control of antenna setting */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 |= LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+ rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
+
+ /* Disable BAR - not sure if this has any effect on USB */
+ rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+ rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
+
+ /*
+ * Not sure if we should get into this at all
+ */
+ if (priv->iqk_initialized) {
+ rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup,
+ RTL8XXXU_BB_REGS);
+ } else {
+ rtl8723a_phy_iq_calibrate(priv);
+ priv->iqk_initialized = true;
+ }
+
+ /*
+ * This should enable thermal meter
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60);
+
+ rtl8723a_phy_lc_calibrate(priv);
+
+ /* fix USB interface interference issue */
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+
+ /* Solve too many protocol error on USB bus */
+ /* Can't do this for 8188/8192 UMC A cut parts */
+ rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+ rtl8xxxu_write8(priv, 0xfe41, 0x94);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x19);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+ rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+ rtl8xxxu_write8(priv, 0xfe41, 0x91);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+ rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+ rtl8xxxu_write8(priv, 0xfe41, 0x81);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+ /* Init BT hw config. */
+ rtl8xxxu_init_bt(priv);
+
+ /*
+ * Not sure if we really need to save these parameters, but the
+ * vendor driver does
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+ if (val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)
+ priv->path_a_hi_power = 1;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ priv->path_a_rf_paths = val32 & OFDM_RF_PATH_RX_MASK;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ priv->path_a_ig_value = val32 & OFDM0_X_AGC_CORE1_IGI_MASK;
+
+ /* Set NAV_UPPER to 30000us */
+ val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
+ rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
+
+ /*
+ * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
+ * but we need to fin root cause.
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ if ((val32 & 0xff000000) != 0x83000000) {
+ val32 |= FPGA_RF_MODE_CCK;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
+ val32 |= FWHW_TXQ_CTRL_XMIT_MGMT_ACK;
+ /* ack for xmit mgmt frames. */
+ rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
+
+exit:
+ return ret;
+}
+
+static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ rtl8xxxu_power_off(priv);
+}
+
+static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
+ struct ieee80211_key_conf *key, const u8 *mac)
+{
+ u32 cmd, val32, addr, ctrl;
+ int j, i, tmp_debug;
+
+ tmp_debug = rtl8xxxu_debug;
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_KEY)
+ rtl8xxxu_debug |= RTL8XXXU_DEBUG_REG_WRITE;
+
+ /*
+ * This is a bit of a hack - the lower bits of the cipher
+ * suite selector happens to match the cipher index in the CAM
+ */
+ addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+ ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+
+ for (j = 5; j >= 0; j--) {
+ switch (j) {
+ case 0:
+ val32 = ctrl | (mac[0] << 16) | (mac[1] << 24);
+ break;
+ case 1:
+ val32 = mac[2] | (mac[3] << 8) |
+ (mac[4] << 16) | (mac[5] << 24);
+ break;
+ default:
+ i = (j - 2) << 2;
+ val32 = key->key[i] | (key->key[i + 1] << 8) |
+ key->key[i + 2] << 16 | key->key[i + 3] << 24;
+ break;
+ }
+
+ rtl8xxxu_write32(priv, REG_CAM_WRITE, val32);
+ cmd = CAM_CMD_POLLING | CAM_CMD_WRITE | (addr + j);
+ rtl8xxxu_write32(priv, REG_CAM_CMD, cmd);
+ udelay(100);
+ }
+
+ rtl8xxxu_debug = tmp_debug;
+}
+
+static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, const u8* mac)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ u8 val8;
+
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 |= BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ u8 val8;
+
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 &= ~BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi)
+{
+ struct h2c_cmd h2c;
+
+ h2c.ramask.cmd = H2C_SET_RATE_MASK;
+ h2c.ramask.mask_lo = cpu_to_le16(ramask & 0xffff);
+ h2c.ramask.mask_hi = cpu_to_le16(ramask >> 16);
+
+ h2c.ramask.arg = 0x80;
+ if (sgi)
+ h2c.ramask.arg |= 0x20;
+
+ dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x\n", __func__,
+ ramask, h2c.ramask.arg);
+ rtl8723a_h2c_cmd(priv, &h2c);
+}
+
+static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
+{
+ u32 val32;
+ u8 rate_idx = 0;
+
+ rate_cfg &= RESPONSE_RATE_BITMAP_ALL;
+
+ val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+ val32 |= rate_cfg;
+ rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+ dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__, rate_cfg);
+
+ while (rate_cfg) {
+ rate_cfg = (rate_cfg >> 1);
+ rate_idx++;
+ }
+ rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
+}
+
+static void
+rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changed)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ struct ieee80211_sta *sta;
+ u32 val32;
+ u8 val8;
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ struct h2c_cmd h2c;
+
+ dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ rtl8xxxu_set_linktype(priv, vif->type);
+
+ if (bss_conf->assoc) {
+ u32 ramask;
+ int sgi = 0;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ dev_info(dev, "%s: ASSOC no sta found\n",
+ __func__);
+ rcu_read_unlock();
+ goto error;
+ }
+
+ if (sta->ht_cap.ht_supported)
+ dev_info(dev, "%s: HT supported\n", __func__);
+ if (sta->vht_cap.vht_supported)
+ dev_info(dev, "%s: VHT supported\n", __func__);
+
+ /* TODO: Set bits 28-31 for rate adaptive id */
+ ramask = (sta->supp_rates[0] & 0xfff) |
+ sta->ht_cap.mcs.rx_mask[0] << 12 |
+ sta->ht_cap.mcs.rx_mask[1] << 20;
+ if (sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+ sgi = 1;
+ rcu_read_unlock();
+
+ rtl8xxxu_update_rate_mask(priv, ramask, sgi);
+
+ val32 = rtl8xxxu_read32(priv, REG_RCR);
+ val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON;
+ rtl8xxxu_write32(priv, REG_RCR, val32);
+
+ /* Enable RX of data frames */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
+
+ rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
+
+ rtl8723a_stop_tx_beacon(priv);
+
+ /* joinbss sequence */
+ rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
+ 0xc000 | bss_conf->aid);
+
+ h2c.joinbss.data = H2C_JOIN_BSS_CONNECT;
+ } else {
+ val32 = rtl8xxxu_read32(priv, REG_RCR);
+ val32 &= ~(RCR_CHECK_BSSID_MATCH |
+ RCR_CHECK_BSSID_BEACON);
+ rtl8xxxu_write32(priv, REG_RCR, val32);
+
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 |= BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+
+ /* Disable RX of data frames */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+ h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
+ }
+ h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT;
+ rtl8723a_h2c_cmd(priv, &h2c);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ dev_dbg(dev, "Changed ERP_PREAMBLE: Use short preamble %i\n",
+ bss_conf->use_short_preamble);
+ val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ if (bss_conf->use_short_preamble)
+ val32 |= RSR_ACK_SHORT_PREAMBLE;
+ else
+ val32 &= ~RSR_ACK_SHORT_PREAMBLE;
+ rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ dev_dbg(dev, "Changed ERP_SLOT: short_slot_time %i\n",
+ bss_conf->use_short_slot);
+
+ if (bss_conf->use_short_slot)
+ val8 = 9;
+ else
+ val8 = 20;
+ rtl8xxxu_write8(priv, REG_SLOT, val8);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ dev_dbg(dev, "Changed BSSID!\n");
+ rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ dev_dbg(dev, "Changed BASIC_RATES!\n");
+ rtl8xxxu_set_basic_rates(priv, bss_conf->basic_rates);
+ }
+error:
+ return;
+}
+
+static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
+{
+ u32 rtlqueue;
+
+ switch (queue) {
+ case IEEE80211_AC_VO:
+ rtlqueue = TXDESC_QUEUE_VO;
+ break;
+ case IEEE80211_AC_VI:
+ rtlqueue = TXDESC_QUEUE_VI;
+ break;
+ case IEEE80211_AC_BE:
+ rtlqueue = TXDESC_QUEUE_BE;
+ break;
+ case IEEE80211_AC_BK:
+ rtlqueue = TXDESC_QUEUE_BK;
+ break;
+ default:
+ rtlqueue = TXDESC_QUEUE_BE;
+ }
+
+ return rtlqueue;
+}
+
+static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u32 queue;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ queue = TXDESC_QUEUE_MGNT;
+ else
+ queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb));
+
+ return queue;
+}
+
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc)
+{
+ __le16 *ptr = (__le16 *)tx_desc;
+ u16 csum = 0;
+ int i;
+
+ /*
+ * Clear csum field before calculation, as the csum field is
+ * in the middle of the struct.
+ */
+ tx_desc->csum = cpu_to_le16(0);
+
+ for (i = 0; i < (sizeof(struct rtl8xxxu_tx_desc) / sizeof(u16)); i++)
+ csum = csum ^ le16_to_cpu(ptr[i]);
+
+ tx_desc->csum |= cpu_to_le16(csum);
+}
+
+static void rtl8xxxu_free_tx_resources(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8xxxu_tx_urb *tx_urb, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->tx_urb_lock, flags);
+ list_for_each_entry_safe(tx_urb, tmp, &priv->tx_urb_free_list, list) {
+ list_del(&tx_urb->list);
+ priv->tx_urb_free_count--;
+ usb_free_urb(&tx_urb->urb);
+ }
+ spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static struct rtl8xxxu_tx_urb *
+rtl8xxxu_alloc_tx_urb(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8xxxu_tx_urb *tx_urb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->tx_urb_lock, flags);
+ tx_urb = list_first_entry_or_null(&priv->tx_urb_free_list,
+ struct rtl8xxxu_tx_urb, list);
+ if (tx_urb) {
+ list_del(&tx_urb->list);
+ priv->tx_urb_free_count--;
+ if (priv->tx_urb_free_count < RTL8XXXU_TX_URB_LOW_WATER &&
+ !priv->tx_stopped) {
+ priv->tx_stopped = true;
+ ieee80211_stop_queues(priv->hw);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+
+ return tx_urb;
+}
+
+static void rtl8xxxu_free_tx_urb(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_tx_urb *tx_urb)
+{
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&tx_urb->list);
+
+ spin_lock_irqsave(&priv->tx_urb_lock, flags);
+
+ list_add(&tx_urb->list, &priv->tx_urb_free_list);
+ priv->tx_urb_free_count++;
+ if (priv->tx_urb_free_count > RTL8XXXU_TX_URB_HIGH_WATER &&
+ priv->tx_stopped) {
+ priv->tx_stopped = false;
+ ieee80211_wake_queues(priv->hw);
+ }
+
+ spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static void rtl8xxxu_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_hw *hw;
+ struct rtl8xxxu_tx_urb *tx_urb =
+ container_of(urb, struct rtl8xxxu_tx_urb, urb);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ hw = tx_info->rate_driver_data[0];
+
+ skb_pull(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+ ieee80211_tx_info_clear_status(tx_info);
+ tx_info->status.rates[0].idx = -1;
+ tx_info->status.rates[0].count = 0;
+
+ if (!urb->status)
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(hw, skb);
+
+ rtl8xxxu_free_tx_urb(hw->priv, tx_urb);
+}
+
+static void rtl8xxxu_dump_action(struct device *dev,
+ struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+ u16 cap, timeout;
+
+ if (!(rtl8xxxu_debug & RTL8XXXU_DEBUG_ACTION))
+ return;
+
+ switch (mgmt->u.action.u.addba_resp.action_code) {
+ case WLAN_ACTION_ADDBA_RESP:
+ cap = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
+ timeout = le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
+ dev_info(dev, "WLAN_ACTION_ADDBA_RESP: "
+ "timeout %i, tid %02x, buf_size %02x, policy %02x, "
+ "status %02x\n",
+ timeout,
+ (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+ (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+ (cap >> 1) & 0x1,
+ le16_to_cpu(mgmt->u.action.u.addba_resp.status));
+ break;
+ case WLAN_ACTION_ADDBA_REQ:
+ cap = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+ timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
+ dev_info(dev, "WLAN_ACTION_ADDBA_REQ: "
+ "timeout %i, tid %02x, buf_size %02x, policy %02x\n",
+ timeout,
+ (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+ (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+ (cap >> 1) & 0x1);
+ break;
+ default:
+ dev_info(dev, "action frame %02x\n",
+ mgmt->u.action.u.addba_resp.action_code);
+ break;
+ }
+}
+
+static void rtl8xxxu_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct rtl8xxxu_tx_desc *tx_desc;
+ struct rtl8xxxu_tx_urb *tx_urb;
+ struct ieee80211_sta *sta = NULL;
+ struct ieee80211_vif *vif = tx_info->control.vif;
+ struct device *dev = &priv->udev->dev;
+ u32 queue, rate;
+ u16 pktlen = skb->len;
+ u16 seq_number;
+ u16 rate_flag = tx_info->control.rates[0].flags;
+ int ret;
+
+ if (skb_headroom(skb) < sizeof(struct rtl8xxxu_tx_desc)) {
+ dev_warn(dev,
+ "%s: Not enough headroom (%i) for tx descriptor\n",
+ __func__, skb_headroom(skb));
+ goto error;
+ }
+
+ if (unlikely(skb->len > (65535 - sizeof(struct rtl8xxxu_tx_desc)))) {
+ dev_warn(dev, "%s: Trying to send over-sized skb (%i)\n",
+ __func__, skb->len);
+ goto error;
+ }
+
+ tx_urb = rtl8xxxu_alloc_tx_urb(priv);
+ if (!tx_urb) {
+ dev_warn(dev, "%s: Unable to allocate tx urb\n", __func__);
+ goto error;
+ }
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+ dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n",
+ __func__, tx_rate->bitrate, tx_rate->hw_value, pktlen);
+
+ if (ieee80211_is_action(hdr->frame_control))
+ rtl8xxxu_dump_action(dev, hdr);
+
+ tx_info->rate_driver_data[0] = hw;
+
+ if (control && control->sta)
+ sta = control->sta;
+
+ tx_desc = (struct rtl8xxxu_tx_desc *)
+ skb_push(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+ memset(tx_desc, 0, sizeof(struct rtl8xxxu_tx_desc));
+ tx_desc->pkt_size = cpu_to_le16(pktlen);
+ tx_desc->pkt_offset = sizeof(struct rtl8xxxu_tx_desc);
+
+ tx_desc->txdw0 =
+ TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+
+ queue = rtl8xxxu_queue_select(hw, skb);
+ tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+
+ if (tx_info->control.hw_key) {
+ switch (tx_info->control.hw_key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_RC4);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_AES);
+ break;
+ default:
+ break;
+ }
+ }
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT);
+
+ if (rate_flag & IEEE80211_TX_RC_MCS)
+ rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+ else
+ rate = tx_rate->hw_value;
+ tx_desc->txdw5 = cpu_to_le32(rate);
+
+ if (ieee80211_is_data(hdr->frame_control))
+ tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+ /* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
+ if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
+ if (sta->ht_cap.ht_supported) {
+ u32 ampdu, val32;
+
+ ampdu = (u32)sta->ht_cap.ampdu_density;
+ val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
+ tx_desc->txdw2 |= cpu_to_le32(val32);
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE);
+ } else
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+ } else
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS);
+ if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+ (sta && vif && vif->bss_conf.use_short_preamble))
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_SHORT_PREAMBLE);
+ if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+ (ieee80211_is_data_qos(hdr->frame_control) &&
+ sta && sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+ }
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE);
+ tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT);
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE);
+ }
+
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /* Use RTS rate 24M - does the mac80211 tell us which to use? */
+ tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE);
+ }
+
+ rtl8xxxu_calc_tx_desc_csum(tx_desc);
+
+ usb_fill_bulk_urb(&tx_urb->urb, priv->udev, priv->pipe_out[queue],
+ skb->data, skb->len, rtl8xxxu_tx_complete, skb);
+
+ usb_anchor_urb(&tx_urb->urb, &priv->tx_anchor);
+ ret = usb_submit_urb(&tx_urb->urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(&tx_urb->urb);
+ rtl8xxxu_free_tx_urb(priv, tx_urb);
+ goto error;
+ }
+ return;
+error:
+ dev_kfree_skb(skb);
+}
+
+static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
+ struct ieee80211_rx_status *rx_status,
+ struct rtl8xxxu_rx_desc *rx_desc,
+ struct rtl8723au_phy_stats *phy_stats)
+{
+ if (phy_stats->sgi_en)
+ rx_status->flag |= RX_FLAG_SHORT_GI;
+
+ if (rx_desc->rxmcs < DESC_RATE_6M) {
+ /*
+ * Handle PHY stats for CCK rates
+ */
+ u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a;
+
+ switch (cck_agc_rpt & 0xc0) {
+ case 0xc0:
+ rx_status->signal = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x80:
+ rx_status->signal = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x40:
+ rx_status->signal = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x00:
+ rx_status->signal = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ rx_status->signal =
+ (phy_stats->cck_sig_qual_ofdm_pwdb_all >> 1) - 110;
+ }
+}
+
+static void rtl8xxxu_free_rx_resources(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+ list_for_each_entry_safe(rx_urb, tmp,
+ &priv->rx_urb_pending_list, list) {
+ list_del(&rx_urb->list);
+ priv->rx_urb_pending_count--;
+ usb_free_urb(&rx_urb->urb);
+ }
+
+ spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+}
+
+static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rx_urb *rx_urb)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ int pending = 0;
+
+ spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+ if (!priv->shutdown) {
+ list_add_tail(&rx_urb->list, &priv->rx_urb_pending_list);
+ priv->rx_urb_pending_count++;
+ pending = priv->rx_urb_pending_count;
+ } else {
+ skb = (struct sk_buff *)rx_urb->urb.context;
+ dev_kfree_skb(skb);
+ usb_free_urb(&rx_urb->urb);
+ }
+
+ spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+ if (pending > RTL8XXXU_RX_URB_PENDING_WATER)
+ schedule_work(&priv->rx_urb_wq);
+}
+
+static void rtl8xxxu_rx_urb_work(struct work_struct *work)
+{
+ struct rtl8xxxu_priv *priv;
+ struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+ struct list_head local;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret;
+
+ priv = container_of(work, struct rtl8xxxu_priv, rx_urb_wq);
+ INIT_LIST_HEAD(&local);
+
+ spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+ list_splice_init(&priv->rx_urb_pending_list, &local);
+ priv->rx_urb_pending_count = 0;
+
+ spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+ list_for_each_entry_safe(rx_urb, tmp, &local, list) {
+ list_del_init(&rx_urb->list);
+ ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+ /*
+ * If out of memory or temporary error, put it back on the
+ * queue and try again. Otherwise the device is dead/gone
+ * and we should drop it.
+ */
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMEM:
+ case -EAGAIN:
+ rtl8xxxu_queue_rx_urb(priv, rx_urb);
+ break;
+ default:
+ pr_info("failed to requeue urb %i\n", ret);
+ skb = (struct sk_buff *)rx_urb->urb.context;
+ dev_kfree_skb(skb);
+ usb_free_urb(&rx_urb->urb);
+ }
+ }
+}
+
+static void rtl8xxxu_rx_complete(struct urb *urb)
+{
+ struct rtl8xxxu_rx_urb *rx_urb =
+ container_of(urb, struct rtl8xxxu_rx_urb, urb);
+ struct ieee80211_hw *hw = rx_urb->hw;
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+ struct rtl8723au_phy_stats *phy_stats;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_mgmt *mgmt;
+ struct device *dev = &priv->udev->dev;
+ __le32 *_rx_desc_le = (__le32 *)skb->data;
+ u32 *_rx_desc = (u32 *)skb->data;
+ int cnt, len, drvinfo_sz, desc_shift, i;
+
+ for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
+ _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+ cnt = rx_desc->frag;
+ len = rx_desc->pktlen;
+ drvinfo_sz = rx_desc->drvinfo_sz * 8;
+ desc_shift = rx_desc->shift;
+ skb_put(skb, urb->actual_length);
+
+ if (urb->status == 0) {
+ skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+ phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+
+ skb_pull(skb, drvinfo_sz + desc_shift);
+
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ if (rx_desc->phy_stats)
+ rtl8xxxu_rx_parse_phystats(priv, rx_status,
+ rx_desc, phy_stats);
+
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+
+ rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!rx_desc->swdec)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_desc->bw)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (rx_desc->rxht) {
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+ } else {
+ rx_status->rate_idx = rx_desc->rxmcs;
+ }
+
+ ieee80211_rx_irqsafe(hw, skb);
+ skb = NULL;
+ rx_urb->urb.context = NULL;
+ rtl8xxxu_queue_rx_urb(priv, rx_urb);
+ } else {
+ dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+ goto cleanup;
+ }
+ return;
+
+cleanup:
+ usb_free_urb(urb);
+ dev_kfree_skb(skb);
+ return;
+}
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rx_urb *rx_urb)
+{
+ struct sk_buff *skb;
+ int skb_size;
+ int ret;
+
+ skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+ skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+ usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
+ skb_size, rtl8xxxu_rx_complete, skb);
+ usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
+ ret = usb_submit_urb(&rx_urb->urb, GFP_ATOMIC);
+ if (ret)
+ usb_unanchor_urb(&rx_urb->urb);
+ return ret;
+}
+
+static void rtl8xxxu_int_complete(struct urb *urb)
+{
+ struct rtl8xxxu_priv *priv = (struct rtl8xxxu_priv *)urb->context;
+ struct device *dev = &priv->udev->dev;
+ int ret;
+
+ dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+ if (urb->status == 0) {
+ usb_anchor_urb(urb, &priv->int_anchor);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ usb_unanchor_urb(urb);
+ } else {
+ dev_info(dev, "%s: Error %i\n", __func__, urb->status);
+ }
+}
+
+
+static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct urb *urb;
+ u32 val32;
+ int ret;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ usb_fill_int_urb(urb, priv->udev, priv->pipe_interrupt,
+ priv->int_buf, USB_INTR_CONTENT_LENGTH,
+ rtl8xxxu_int_complete, priv, 1);
+ usb_anchor_urb(urb, &priv->int_anchor);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto error;
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_USB_HIMR);
+ val32 |= USB_HIMR_CPWM;
+ rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
+
+error:
+ return ret;
+}
+
+static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ int ret;
+ u8 val8;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rtl8723a_stop_tx_beacon(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+ BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ rtl8xxxu_set_linktype(priv, vif->type);
+
+ return ret;
+}
+
+static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ dev_dbg(&priv->udev->dev, "%s\n", __func__);
+}
+
+static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ u16 val16;
+ int ret = 0, channel;
+ bool ht40;
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+ dev_info(dev,
+ "%s: channel: %i (changed %08x chandef.width %02x)\n",
+ __func__, hw->conf.chandef.chan->hw_value,
+ changed, hw->conf.chandef.width);
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ val16 = ((hw->conf.long_frame_max_tx_count <<
+ RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
+ ((hw->conf.short_frame_max_tx_count <<
+ RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
+ rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ switch (hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ ht40 = false;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ ht40 = true;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto exit;
+ }
+
+ channel = hw->conf.chandef.chan->hw_value;
+
+ rtl8723a_set_tx_power(priv, channel, ht40);
+
+ rtl8723au_config_channel(hw);
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8xxxu_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ u32 val32;
+ u8 aifs, acm_ctrl, acm_bit;
+
+ aifs = param->aifs;
+
+ val32 = aifs |
+ fls(param->cw_min) << EDCA_PARAM_ECW_MIN_SHIFT |
+ fls(param->cw_max) << EDCA_PARAM_ECW_MAX_SHIFT |
+ (u32)param->txop << EDCA_PARAM_TXOP_SHIFT;
+
+ acm_ctrl = rtl8xxxu_read8(priv, REG_ACM_HW_CTRL);
+ dev_dbg(dev,
+ "%s: IEEE80211 queue %02x val %08x, acm %i, acm_ctrl %02x\n",
+ __func__, queue, val32, param->acm, acm_ctrl);
+
+ switch (queue) {
+ case IEEE80211_AC_VO:
+ acm_bit = ACM_HW_CTRL_VO;
+ rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, val32);
+ break;
+ case IEEE80211_AC_VI:
+ acm_bit = ACM_HW_CTRL_VI;
+ rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, val32);
+ break;
+ case IEEE80211_AC_BE:
+ acm_bit = ACM_HW_CTRL_BE;
+ rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, val32);
+ break;
+ case IEEE80211_AC_BK:
+ acm_bit = ACM_HW_CTRL_BK;
+ rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, val32);
+ break;
+ default:
+ acm_bit = 0;
+ break;
+ }
+
+ if (param->acm)
+ acm_ctrl |= acm_bit;
+ else
+ acm_ctrl &= ~acm_bit;
+ rtl8xxxu_write8(priv, REG_ACM_HW_CTRL, acm_ctrl);
+
+ return 0;
+}
+
+static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
+ __func__, changed_flags, *total_flags);
+
+ *total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC);
+}
+
+static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
+{
+ if (rts > 2347)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ u8 mac_addr[ETH_ALEN];
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int retval = -EOPNOTSUPP;
+
+ dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
+ __func__, cmd, key->cipher, key->keyidx);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (key->keyidx > 3)
+ return -EOPNOTSUPP;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ dev_dbg(dev, "%s: pairwise key\n", __func__);
+ ether_addr_copy(mac_addr, sta->addr);
+ } else {
+ dev_dbg(dev, "%s: group key\n", __func__);
+ eth_broadcast_addr(mac_addr);
+ }
+
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= CR_SECURITY_ENABLE;
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val8 = SEC_CFG_TX_SEC_ENABLE | SEC_CFG_TXBC_USE_DEFKEY |
+ SEC_CFG_RX_SEC_ENABLE | SEC_CFG_RXBC_USE_DEFKEY;
+ val8 |= SEC_CFG_TX_USE_DEFKEY | SEC_CFG_RX_USE_DEFKEY;
+ rtl8xxxu_write8(priv, REG_SECURITY_CFG, val8);
+
+ switch (cmd) {
+ case SET_KEY:
+ key->hw_key_idx = key->keyidx;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ rtl8xxxu_cam_write(priv, key, mac_addr);
+ retval = 0;
+ break;
+ case DISABLE_KEY:
+ rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
+ val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
+ key->keyidx << CAM_CMD_KEY_SHIFT;
+ rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+ retval = 0;
+ break;
+ default:
+ dev_warn(dev, "%s: Unsupported command %02x\n", __func__, cmd);
+ }
+
+ return retval;
+}
+
+static int
+rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
+ bool amsdu)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ u8 ampdu_factor, ampdu_density;
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
+ ampdu_factor = sta->ht_cap.ampdu_factor;
+ ampdu_density = sta->ht_cap.ampdu_density;
+ rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
+ rtl8xxxu_set_ampdu_min_space(priv, ampdu_density);
+ dev_dbg(dev,
+ "Changed HT: ampdu_factor %02x, ampdu_density %02x\n",
+ ampdu_factor, ampdu_density);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
+ rtl8xxxu_set_ampdu_factor(priv, 0);
+ rtl8xxxu_set_ampdu_min_space(priv, 0);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
+ __func__);
+ rtl8xxxu_set_ampdu_factor(priv, 0);
+ rtl8xxxu_set_ampdu_min_space(priv, 0);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int rtl8xxxu_start(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct rtl8xxxu_rx_urb *rx_urb;
+ struct rtl8xxxu_tx_urb *tx_urb;
+ unsigned long flags;
+ int ret, i;
+
+ ret = 0;
+
+ init_usb_anchor(&priv->rx_anchor);
+ init_usb_anchor(&priv->tx_anchor);
+ init_usb_anchor(&priv->int_anchor);
+
+ rtl8723a_enable_rf(priv);
+ ret = rtl8xxxu_submit_int_urb(hw);
+ if (ret)
+ goto exit;
+
+ for (i = 0; i < RTL8XXXU_TX_URBS; i++) {
+ tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL);
+ if (!tx_urb) {
+ if (!i)
+ ret = -ENOMEM;
+
+ goto error_out;
+ }
+ usb_init_urb(&tx_urb->urb);
+ INIT_LIST_HEAD(&tx_urb->list);
+ tx_urb->hw = hw;
+ list_add(&tx_urb->list, &priv->tx_urb_free_list);
+ priv->tx_urb_free_count++;
+ }
+
+ priv->tx_stopped = false;
+
+ spin_lock_irqsave(&priv->rx_urb_lock, flags);
+ priv->shutdown = false;
+ spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+ for (i = 0; i < RTL8XXXU_RX_URBS; i++) {
+ rx_urb = kmalloc(sizeof(struct rtl8xxxu_rx_urb), GFP_KERNEL);
+ if (!rx_urb) {
+ if (!i)
+ ret = -ENOMEM;
+
+ goto error_out;
+ }
+ usb_init_urb(&rx_urb->urb);
+ INIT_LIST_HEAD(&rx_urb->list);
+ rx_urb->hw = hw;
+
+ ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+ }
+exit:
+ /*
+ * Disable all data frames
+ */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+ /*
+ * Accept all mgmt frames
+ */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
+
+ return ret;
+
+error_out:
+ rtl8xxxu_free_tx_resources(priv);
+ /*
+ * Disable all data and mgmt frames
+ */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+ rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+
+ return ret;
+}
+
+static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ unsigned long flags;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+
+ spin_lock_irqsave(&priv->rx_urb_lock, flags);
+ priv->shutdown = true;
+ spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+ usb_kill_anchored_urbs(&priv->rx_anchor);
+ usb_kill_anchored_urbs(&priv->tx_anchor);
+ usb_kill_anchored_urbs(&priv->int_anchor);
+
+ rtl8723a_disable_rf(priv);
+
+ /*
+ * Disable interrupts
+ */
+ rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+
+ rtl8xxxu_free_rx_resources(priv);
+ rtl8xxxu_free_tx_resources(priv);
+}
+
+static const struct ieee80211_ops rtl8xxxu_ops = {
+ .tx = rtl8xxxu_tx,
+ .add_interface = rtl8xxxu_add_interface,
+ .remove_interface = rtl8xxxu_remove_interface,
+ .config = rtl8xxxu_config,
+ .conf_tx = rtl8xxxu_conf_tx,
+ .bss_info_changed = rtl8xxxu_bss_info_changed,
+ .configure_filter = rtl8xxxu_configure_filter,
+ .set_rts_threshold = rtl8xxxu_set_rts_threshold,
+ .start = rtl8xxxu_start,
+ .stop = rtl8xxxu_stop,
+ .sw_scan_start = rtl8xxxu_sw_scan_start,
+ .sw_scan_complete = rtl8xxxu_sw_scan_complete,
+ .set_key = rtl8xxxu_set_key,
+ .ampdu_action = rtl8xxxu_ampdu_action,
+};
+
+static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
+ struct usb_interface *interface)
+{
+ struct usb_interface_descriptor *interface_desc;
+ struct usb_host_interface *host_interface;
+ struct usb_endpoint_descriptor *endpoint;
+ struct device *dev = &priv->udev->dev;
+ int i, j = 0, endpoints;
+ u8 dir, xtype, num;
+ int ret = 0;
+
+ host_interface = &interface->altsetting[0];
+ interface_desc = &host_interface->desc;
+ endpoints = interface_desc->bNumEndpoints;
+
+ for (i = 0; i < endpoints; i++) {
+ endpoint = &host_interface->endpoint[i].desc;
+
+ dir = endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+ num = usb_endpoint_num(endpoint);
+ xtype = usb_endpoint_type(endpoint);
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+ dev_dbg(dev,
+ "%s: endpoint: dir %02x, # %02x, type %02x\n",
+ __func__, dir, num, xtype);
+ if (usb_endpoint_dir_in(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+ dev_dbg(dev, "%s: in endpoint num %i\n",
+ __func__, num);
+
+ if (priv->pipe_in) {
+ dev_warn(dev,
+ "%s: Too many IN pipes\n", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ priv->pipe_in = usb_rcvbulkpipe(priv->udev, num);
+ }
+
+ if (usb_endpoint_dir_in(endpoint) &&
+ usb_endpoint_xfer_int(endpoint)) {
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+ dev_dbg(dev, "%s: interrupt endpoint num %i\n",
+ __func__, num);
+
+ if (priv->pipe_interrupt) {
+ dev_warn(dev, "%s: Too many INTERRUPT pipes\n",
+ __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ priv->pipe_interrupt = usb_rcvintpipe(priv->udev, num);
+ }
+
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+ dev_dbg(dev, "%s: out endpoint num %i\n",
+ __func__, num);
+ if (j >= RTL8XXXU_OUT_ENDPOINTS) {
+ dev_warn(dev,
+ "%s: Too many OUT pipes\n", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+ priv->out_ep[j++] = num;
+ }
+ }
+exit:
+ priv->nr_out_eps = j;
+ return ret;
+}
+
+static int rtl8xxxu_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct rtl8xxxu_priv *priv;
+ struct ieee80211_hw *hw;
+ struct usb_device *udev;
+ struct ieee80211_supported_band *sband;
+ int ret = 0;
+ int untested = 1;
+
+ udev = usb_get_dev(interface_to_usbdev(interface));
+
+ switch (id->idVendor) {
+ case USB_VENDOR_ID_REALTEK:
+ switch(id->idProduct) {
+ case 0x1724:
+ case 0x8176:
+ case 0x8178:
+ case 0x817f:
+ untested = 0;
+ break;
+ }
+ break;
+ case 0x7392:
+ if (id->idProduct == 0x7811)
+ untested = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (untested) {
+ rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+ dev_info(&udev->dev,
+ "This Realtek USB WiFi dongle (0x%04x:0x%04x) is untested!\n",
+ id->idVendor, id->idProduct);
+ dev_info(&udev->dev,
+ "Please report results to Jes.Sorensen@gmail.com\n");
+ }
+
+ hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
+ if (!hw) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->udev = udev;
+ priv->fops = (struct rtl8xxxu_fileops *)id->driver_info;
+ mutex_init(&priv->usb_buf_mutex);
+ mutex_init(&priv->h2c_mutex);
+ INIT_LIST_HEAD(&priv->tx_urb_free_list);
+ spin_lock_init(&priv->tx_urb_lock);
+ INIT_LIST_HEAD(&priv->rx_urb_pending_list);
+ spin_lock_init(&priv->rx_urb_lock);
+ INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
+
+ usb_set_intfdata(interface, hw);
+
+ ret = rtl8xxxu_parse_usb(priv, interface);
+ if (ret)
+ goto exit;
+
+ ret = rtl8xxxu_identify_chip(priv);
+ if (ret) {
+ dev_err(&udev->dev, "Fatal - failed to identify chip\n");
+ goto exit;
+ }
+
+ ret = rtl8xxxu_read_efuse(priv);
+ if (ret) {
+ dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
+ goto exit;
+ }
+
+ ret = priv->fops->parse_efuse(priv);
+ if (ret) {
+ dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
+ goto exit;
+ }
+
+ rtl8xxxu_print_chipinfo(priv);
+
+ ret = priv->fops->load_firmware(priv);
+ if (ret) {
+ dev_err(&udev->dev, "Fatal - failed to load firmware\n");
+ goto exit;
+ }
+
+ ret = rtl8xxxu_init_device(hw);
+
+ hw->wiphy->max_scan_ssids = 1;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->queues = 4;
+
+ sband = &rtl8xxxu_supported_band;
+ sband->ht_cap.ht_supported = true;
+ sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ sband->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ sband->ht_cap.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40;
+ memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs));
+ sband->ht_cap.mcs.rx_mask[0] = 0xff;
+ sband->ht_cap.mcs.rx_mask[4] = 0x01;
+ if (priv->rf_paths > 1) {
+ sband->ht_cap.mcs.rx_mask[1] = 0xff;
+ sband->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ }
+ sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ /*
+ * Some APs will negotiate HT20_40 in a noisy environment leading
+ * to miserable performance. Rather than defaulting to this, only
+ * enable it if explicitly requested at module load time.
+ */
+ if (rtl8xxxu_ht40_2g) {
+ dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
+ sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+ hw->wiphy->rts_threshold = 2347;
+
+ SET_IEEE80211_DEV(priv->hw, &interface->dev);
+ SET_IEEE80211_PERM_ADDR(hw, priv->mac_addr);
+
+ hw->extra_tx_headroom = sizeof(struct rtl8xxxu_tx_desc);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ /*
+ * The firmware handles rate control
+ */
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+
+ ret = ieee80211_register_hw(priv->hw);
+ if (ret) {
+ dev_err(&udev->dev, "%s: Failed to register: %i\n",
+ __func__, ret);
+ goto exit;
+ }
+
+exit:
+ if (ret < 0)
+ usb_put_dev(udev);
+ return ret;
+}
+
+static void rtl8xxxu_disconnect(struct usb_interface *interface)
+{
+ struct rtl8xxxu_priv *priv;
+ struct ieee80211_hw *hw;
+
+ hw = usb_get_intfdata(interface);
+ priv = hw->priv;
+
+ rtl8xxxu_disable_device(hw);
+ usb_set_intfdata(interface, NULL);
+
+ dev_info(&priv->udev->dev, "disconnecting\n");
+
+ ieee80211_unregister_hw(hw);
+
+ kfree(priv->fw_data);
+ mutex_destroy(&priv->usb_buf_mutex);
+ mutex_destroy(&priv->h2c_mutex);
+
+ usb_put_dev(priv->udev);
+ ieee80211_free_hw(hw);
+}
+
+static struct rtl8xxxu_fileops rtl8723au_fops = {
+ .parse_efuse = rtl8723au_parse_efuse,
+ .load_firmware = rtl8723au_load_firmware,
+ .power_on = rtl8723au_power_on,
+ .writeN_block_size = 1024,
+};
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static struct rtl8xxxu_fileops rtl8192cu_fops = {
+ .parse_efuse = rtl8192cu_parse_efuse,
+ .load_firmware = rtl8192cu_load_firmware,
+ .power_on = rtl8192cu_power_on,
+ .writeN_block_size = 128,
+};
+
+#endif
+
+static struct usb_device_id dev_table[] = {
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1724, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0724, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8723au_fops},
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+/* Still supported by rtlwifi */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8178, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Larry Finger */
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Currently untested 8188 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8177, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817a, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817d, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0631, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x094c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x5088, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0052, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x005c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0eb0, 0x9071, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x103c, 0x1629, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2a, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2e, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffa, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff8, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffb, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffc, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x1201, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Currently untested 8192 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0061, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8178, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9021, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0xf001, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x2e2e, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0019, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0020, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3307, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3309, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0100, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0091, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+#endif
+{ }
+};
+
+static struct usb_driver rtl8xxxu_driver = {
+ .name = DRIVER_NAME,
+ .probe = rtl8xxxu_probe,
+ .disconnect = rtl8xxxu_disconnect,
+ .id_table = dev_table,
+ .disable_hub_initiated_lpm = 1,
+};
+
+static int __init rtl8xxxu_module_init(void)
+{
+ int res;
+
+ res = usb_register(&rtl8xxxu_driver);
+ if (res < 0)
+ pr_err(DRIVER_NAME ": usb_register() failed (%i)\n", res);
+
+ return res;
+}
+
+static void __exit rtl8xxxu_module_exit(void)
+{
+ usb_deregister(&rtl8xxxu_driver);
+}
+
+
+MODULE_DEVICE_TABLE(usb, dev_table);
+
+module_init(rtl8xxxu_module_init);
+module_exit(rtl8xxxu_module_exit);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
new file mode 100644
index 0000000..f2a1bac
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+#include <asm/byteorder.h>
+
+#define RTL8XXXU_DEBUG_REG_WRITE 0x01
+#define RTL8XXXU_DEBUG_REG_READ 0x02
+#define RTL8XXXU_DEBUG_RFREG_WRITE 0x04
+#define RTL8XXXU_DEBUG_RFREG_READ 0x08
+#define RTL8XXXU_DEBUG_CHANNEL 0x10
+#define RTL8XXXU_DEBUG_TX 0x20
+#define RTL8XXXU_DEBUG_TX_DUMP 0x40
+#define RTL8XXXU_DEBUG_RX 0x80
+#define RTL8XXXU_DEBUG_RX_DUMP 0x100
+#define RTL8XXXU_DEBUG_USB 0x200
+#define RTL8XXXU_DEBUG_KEY 0x400
+#define RTL8XXXU_DEBUG_H2C 0x800
+#define RTL8XXXU_DEBUG_ACTION 0x1000
+#define RTL8XXXU_DEBUG_EFUSE 0x2000
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT 500
+#define RTL8XXXU_MAX_REG_POLL 500
+#define USB_INTR_CONTENT_LENGTH 56
+
+#define RTL8XXXU_OUT_ENDPOINTS 3
+
+#define REALTEK_USB_READ 0xc0
+#define REALTEK_USB_WRITE 0x40
+#define REALTEK_USB_CMD_REQ 0x05
+#define REALTEK_USB_CMD_IDX 0x00
+
+#define TX_TOTAL_PAGE_NUM 0xf8
+/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
+#define TX_PAGE_NUM_PUBQ 0xe7
+#define TX_PAGE_NUM_HI_PQ 0x0c
+#define TX_PAGE_NUM_LO_PQ 0x02
+#define TX_PAGE_NUM_NORM_PQ 0x02
+
+#define RTL_FW_PAGE_SIZE 4096
+#define RTL8XXXU_FIRMWARE_POLL_MAX 1000
+
+#define RTL8723A_CHANNEL_GROUPS 3
+#define RTL8723A_MAX_RF_PATHS 2
+#define RF6052_MAX_TX_PWR 0x3f
+
+#define EFUSE_MAP_LEN_8723A 256
+#define EFUSE_MAX_SECTION_8723A 32
+#define EFUSE_REAL_CONTENT_LEN_8723A 512
+#define EFUSE_BT_MAP_LEN_8723A 1024
+#define EFUSE_MAX_WORD_UNIT 4
+
+struct rtl8xxxu_rx_desc {
+#ifdef __LITTLE_ENDIAN
+ u32 pktlen:14;
+ u32 crc32:1;
+ u32 icverr:1;
+ u32 drvinfo_sz:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phy_stats:1;
+ u32 swdec:1;
+ u32 ls:1;
+ u32 fs:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:5;
+ u32 tid:4;
+ u32 hwrsvd:4;
+ u32 amsdu:1;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1fit:4;
+ u32 a2fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 md:1;
+ u32 mf:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 reserved0:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 gf:1;
+ u32 splcp:1;
+ u32 bw:1;
+ u32 htc:1;
+ u32 eosp:1;
+ u32 bssidfit:2;
+ u32 reserved1:16;
+ u32 unicastwake:1;
+ u32 magicwake:1;
+
+ u32 pattern0match:1;
+ u32 pattern1match:1;
+ u32 pattern2match:1;
+ u32 pattern3match:1;
+ u32 pattern4match:1;
+ u32 pattern5match:1;
+ u32 pattern6match:1;
+ u32 pattern7match:1;
+ u32 pattern8match:1;
+ u32 pattern9match:1;
+ u32 patternamatch:1;
+ u32 patternbmatch:1;
+ u32 patterncmatch:1;
+ u32 reserved2:19;
+#else
+ u32 own:1;
+ u32 eor:1;
+ u32 fs:1;
+ u32 ls:1;
+ u32 swdec:1;
+ u32 phy_stats:1;
+ u32 shift:2;
+ u32 qos:1;
+ u32 security:3;
+ u32 drvinfo_sz:4;
+ u32 icverr:1;
+ u32 crc32:1;
+ u32 pktlen:14;
+
+ u32 bc:1;
+ u32 mc:1;
+ u32 type:2;
+ u32 mf:1;
+ u32 md:1;
+ u32 pwr:1;
+ u32 pam:1;
+ u32 a2fit:4;
+ u32 a1fit:4;
+ u32 faggr:1;
+ u32 paggr:1;
+ u32 amsdu:1;
+ u32 hwrsvd:4;
+ u32 tid:4;
+ u32 macid:5;
+
+ u32 reserved0:1;
+ u32 nextind:1;
+ u32 nextpktlen:14;
+ u32 frag:4;
+ u32 seq:12;
+
+ u32 magicwake:1;
+ u32 unicastwake:1;
+ u32 reserved1:16;
+ u32 bssidfit:2;
+ u32 eosp:1;
+ u32 htc:1;
+ u32 bw:1;
+ u32 splcp:1;
+ u32 gf:1;
+ u32 rxht:1;
+ u32 rxmcs:6;
+
+ u32 reserved2:19;
+ u32 patterncmatch:1;
+ u32 patternbmatch:1;
+ u32 patternamatch:1;
+ u32 pattern9match:1;
+ u32 pattern8match:1;
+ u32 pattern7match:1;
+ u32 pattern6match:1;
+ u32 pattern5match:1;
+ u32 pattern4match:1;
+ u32 pattern3match:1;
+ u32 pattern2match:1;
+ u32 pattern1match:1;
+ u32 pattern0match:1;
+#endif
+ __le32 tsfl;
+#if 0
+ u32 bassn:12;
+ u32 bavld:1;
+ u32 reserved3:19;
+#endif
+};
+
+struct rtl8xxxu_tx_desc {
+ __le16 pkt_size;
+ u8 pkt_offset;
+ u8 txdw0;
+ __le32 txdw1;
+ __le32 txdw2;
+ __le32 txdw3;
+ __le32 txdw4;
+ __le32 txdw5;
+ __le32 txdw6;
+ __le16 csum;
+ __le16 txdw7;
+};
+
+/* CCK Rates, TxHT = 0 */
+#define DESC_RATE_1M 0x00
+#define DESC_RATE_2M 0x01
+#define DESC_RATE_5_5M 0x02
+#define DESC_RATE_11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC_RATE_6M 0x04
+#define DESC_RATE_9M 0x05
+#define DESC_RATE_12M 0x06
+#define DESC_RATE_18M 0x07
+#define DESC_RATE_24M 0x08
+#define DESC_RATE_36M 0x09
+#define DESC_RATE_48M 0x0a
+#define DESC_RATE_54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC_RATE_MCS0 0x0c
+#define DESC_RATE_MCS1 0x0d
+#define DESC_RATE_MCS2 0x0e
+#define DESC_RATE_MCS3 0x0f
+#define DESC_RATE_MCS4 0x10
+#define DESC_RATE_MCS5 0x11
+#define DESC_RATE_MCS6 0x12
+#define DESC_RATE_MCS7 0x13
+#define DESC_RATE_MCS8 0x14
+#define DESC_RATE_MCS9 0x15
+#define DESC_RATE_MCS10 0x16
+#define DESC_RATE_MCS11 0x17
+#define DESC_RATE_MCS12 0x18
+#define DESC_RATE_MCS13 0x19
+#define DESC_RATE_MCS14 0x1a
+#define DESC_RATE_MCS15 0x1b
+#define DESC_RATE_MCS15_SG 0x1c
+#define DESC_RATE_MCS32 0x20
+
+#define TXDESC_OFFSET_SZ 0
+#define TXDESC_OFFSET_SHT 16
+#if 0
+#define TXDESC_BMC BIT(24)
+#define TXDESC_LSG BIT(26)
+#define TXDESC_FSG BIT(27)
+#define TXDESC_OWN BIT(31)
+#else
+#define TXDESC_BROADMULTICAST BIT(0)
+#define TXDESC_LAST_SEGMENT BIT(2)
+#define TXDESC_FIRST_SEGMENT BIT(3)
+#define TXDESC_OWN BIT(7)
+#endif
+
+/* Word 1 */
+#define TXDESC_PKT_OFFSET_SZ 0
+#define TXDESC_AGG_ENABLE BIT(5)
+#define TXDESC_BK BIT(6)
+#define TXDESC_QUEUE_SHIFT 8
+#define TXDESC_QUEUE_MASK 0x1f00
+#define TXDESC_QUEUE_BK 0x2
+#define TXDESC_QUEUE_BE 0x0
+#define TXDESC_QUEUE_VI 0x5
+#define TXDESC_QUEUE_VO 0x7
+#define TXDESC_QUEUE_BEACON 0x10
+#define TXDESC_QUEUE_HIGH 0x11
+#define TXDESC_QUEUE_MGNT 0x12
+#define TXDESC_QUEUE_CMD 0x13
+#define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1)
+
+#define DESC_RATE_ID_SHIFT 16
+#define DESC_RATE_ID_MASK 0xf
+#define TXDESC_NAVUSEHDR BIT(20)
+#define TXDESC_SEC_RC4 0x00400000
+#define TXDESC_SEC_AES 0x00c00000
+#define TXDESC_PKT_OFFSET_SHIFT 26
+#define TXDESC_AGG_EN BIT(29)
+#define TXDESC_HWPC BIT(31)
+
+/* Word 2 */
+#define TXDESC_ACK_REPORT BIT(19)
+#define TXDESC_AMPDU_DENSITY_SHIFT 20
+
+/* Word 3 */
+#define TXDESC_SEQ_SHIFT 16
+#define TXDESC_SEQ_MASK 0x0fff0000
+
+/* Word 4 */
+#define TXDESC_QOS BIT(6)
+#define TXDESC_HW_SEQ_ENABLE BIT(7)
+#define TXDESC_USE_DRIVER_RATE BIT(8)
+#define TXDESC_DISABLE_DATA_FB BIT(10)
+#define TXDESC_CTS_SELF_ENABLE BIT(11)
+#define TXDESC_RTS_CTS_ENABLE BIT(12)
+#define TXDESC_HW_RTS_ENABLE BIT(13)
+#define TXDESC_PRIME_CH_OFF_LOWER BIT(20)
+#define TXDESC_PRIME_CH_OFF_UPPER BIT(21)
+#define TXDESC_SHORT_PREAMBLE BIT(24)
+#define TXDESC_DATA_BW BIT(25)
+#define TXDESC_RTS_DATA_BW BIT(27)
+#define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28)
+#define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29)
+
+/* Word 5 */
+#define TXDESC_RTS_RATE_SHIFT 0
+#define TXDESC_RTS_RATE_MASK 0x3f
+#define TXDESC_SHORT_GI BIT(6)
+#define TXDESC_CCX_TAG BIT(7)
+#define TXDESC_RETRY_LIMIT_ENABLE BIT(17)
+#define TXDESC_RETRY_LIMIT_SHIFT 18
+#define TXDESC_RETRY_LIMIT_MASK 0x00fc0000
+
+/* Word 6 */
+#define TXDESC_MAX_AGG_SHIFT 11
+
+struct phy_rx_agc_info {
+#ifdef __LITTLE_ENDIAN
+ u8 gain:7, trsw:1;
+#else
+ u8 trsw:1, gain:7;
+#endif
+};
+
+struct rtl8723au_phy_stats {
+ struct phy_rx_agc_info path_agc[RTL8723A_MAX_RF_PATHS];
+ u8 ch_corr[RTL8723A_MAX_RF_PATHS];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 reserved_1;
+ u8 noise_power_db_msb;
+ u8 path_cfotail[RTL8723A_MAX_RF_PATHS];
+ u8 pcts_mask[RTL8723A_MAX_RF_PATHS];
+ s8 stream_rxevm[RTL8723A_MAX_RF_PATHS];
+ u8 path_rxsnr[RTL8723A_MAX_RF_PATHS];
+ u8 noise_power_db_lsb;
+ u8 reserved_2[3];
+ u8 stream_csi[RTL8723A_MAX_RF_PATHS];
+ u8 stream_target_csi[RTL8723A_MAX_RF_PATHS];
+ s8 sig_evm;
+ u8 reserved_3;
+
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 antenna_select_b:1;
+ u8 antenna_select:1;
+#else /* _BIG_ENDIAN_ */
+ u8 antenna_select:1;
+ u8 antenna_select_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+#endif
+};
+
+/*
+ * Regs to backup
+ */
+#define RTL8XXXU_ADDA_REGS 16
+#define RTL8XXXU_MAC_REGS 4
+#define RTL8XXXU_BB_REGS 9
+
+struct rtl8xxxu_firmware_header {
+ __le16 signature; /* 92C0: test chip; 92C,
+ 88C0: test chip;
+ 88C1: MP A-cut;
+ 92C1: MP A-cut */
+ u8 category; /* AP/NIC and USB/PCI */
+ u8 function;
+
+ __le16 major_version; /* FW Version */
+ u8 minor_version; /* FW Subversion, default 0x00 */
+ u8 reserved1;
+
+ u8 month; /* Release time Month field */
+ u8 date; /* Release time Date field */
+ u8 hour; /* Release time Hour field */
+ u8 minute; /* Release time Minute field */
+
+ __le16 ramcodesize; /* Size of RAM code */
+ u16 reserved2;
+
+ __le32 svn_idx; /* SVN entry index */
+ u32 reserved3;
+
+ u32 reserved4;
+ u32 reserved5;
+
+ u8 data[0];
+};
+
+/*
+ * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
+ */
+struct rtl8723au_idx {
+#ifdef __LITTLE_ENDIAN
+ int a:4;
+ int b:4;
+#else
+ int b:4;
+ int a:4;
+#endif
+} __attribute__((packed));
+
+struct rtl8723au_efuse {
+ __le16 rtl_id;
+ u8 res0[0xe];
+ u8 cck_tx_power_index_A[3]; /* 0x10 */
+ u8 cck_tx_power_index_B[3];
+ u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */
+ u8 ht40_1s_tx_power_index_B[3];
+ /*
+ * The following entries are half-bytes split as:
+ * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+ */
+ struct rtl8723au_idx ht20_tx_power_index_diff[3];
+ struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+ struct rtl8723au_idx ht40_max_power_offset[3];
+ struct rtl8723au_idx ht20_max_power_offset[3];
+ u8 channel_plan; /* 0x28 */
+ u8 tssi_a;
+ u8 thermal_meter;
+ u8 rf_regulatory;
+ u8 rf_option_2;
+ u8 rf_option_3;
+ u8 rf_option_4;
+ u8 res7;
+ u8 version /* 0x30 */;
+ u8 customer_id_major;
+ u8 customer_id_minor;
+ u8 xtal_k;
+ u8 chipset; /* 0x34 */
+ u8 res8[0x82];
+ u8 vid; /* 0xb7 */
+ u8 res9;
+ u8 pid; /* 0xb9 */
+ u8 res10[0x0c];
+ u8 mac_addr[ETH_ALEN]; /* 0xc6 */
+ u8 res11[2];
+ u8 vendor_name[7];
+ u8 res12[2];
+ u8 device_name[0x29]; /* 0xd7 */
+};
+
+struct rtl8192cu_efuse {
+ __le16 rtl_id;
+ __le16 hpon;
+ u8 res0[2];
+ __le16 clk;
+ __le16 testr;
+ __le16 vid;
+ __le16 did;
+ __le16 svid;
+ __le16 smid; /* 0x10 */
+ u8 res1[4];
+ u8 mac_addr[ETH_ALEN]; /* 0x16 */
+ u8 res2[2];
+ u8 vendor_name[7];
+ u8 res3[3];
+ u8 device_name[0x14]; /* 0x28 */
+ u8 res4[0x1e]; /* 0x3c */
+ u8 cck_tx_power_index_A[3]; /* 0x5a */
+ u8 cck_tx_power_index_B[3];
+ u8 ht40_1s_tx_power_index_A[3]; /* 0x60 */
+ u8 ht40_1s_tx_power_index_B[3];
+ /*
+ * The following entries are half-bytes split as:
+ * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+ */
+ struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+ struct rtl8723au_idx ht20_tx_power_index_diff[3]; /* 0x69 */
+ struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+ struct rtl8723au_idx ht40_max_power_offset[3]; /* 0x6f */
+ struct rtl8723au_idx ht20_max_power_offset[3];
+ u8 channel_plan; /* 0x75 */
+ u8 tssi_a;
+ u8 tssi_b;
+ u8 thermal_meter; /* xtal_k */ /* 0x78 */
+ u8 rf_regulatory;
+ u8 rf_option_2;
+ u8 rf_option_3;
+ u8 rf_option_4;
+ u8 res5[1]; /* 0x7d */
+ u8 version;
+ u8 customer_id;
+};
+
+struct rtl8xxxu_reg8val {
+ u16 reg;
+ u8 val;
+};
+
+struct rtl8xxxu_reg32val {
+ u16 reg;
+ u32 val;
+};
+
+struct rtl8xxxu_rfregval {
+ u8 reg;
+ u32 val;
+};
+
+enum rtl8xxxu_rfpath {
+ RF_A = 0,
+ RF_B = 1,
+};
+
+struct rtl8xxxu_rfregs {
+ u16 hssiparm1;
+ u16 hssiparm2;
+ u16 lssiparm;
+ u16 hspiread;
+ u16 lssiread;
+ u16 rf_sw_ctrl;
+};
+
+#define H2C_MAX_MBOX 4
+#define H2C_EXT BIT(7)
+#define H2C_SET_POWER_MODE 1
+#define H2C_JOIN_BSS_REPORT 2
+#define H2C_JOIN_BSS_DISCONNECT 0
+#define H2C_JOIN_BSS_CONNECT 1
+#define H2C_SET_RSSI 5
+#define H2C_SET_RATE_MASK (6 | H2C_EXT)
+
+struct h2c_cmd {
+ union {
+ struct {
+ u8 cmd;
+ u8 data[5];
+ } __packed cmd;
+ struct {
+ __le32 data;
+ __le16 ext;
+ } __packed raw;
+ struct {
+ u8 cmd;
+ u8 data;
+ u8 pad[4];
+ } __packed joinbss;
+ struct {
+ u8 cmd;
+ __le16 mask_hi;
+ u8 arg;
+ __le16 mask_lo;
+ } __packed ramask;
+ };
+};
+
+struct rtl8xxxu_fileops;
+
+struct rtl8xxxu_priv {
+ struct ieee80211_hw *hw;
+ struct usb_device *udev;
+ struct rtl8xxxu_fileops *fops;
+
+ spinlock_t tx_urb_lock;
+ struct list_head tx_urb_free_list;
+ int tx_urb_free_count;
+ bool tx_stopped;
+
+ spinlock_t rx_urb_lock;
+ struct list_head rx_urb_pending_list;
+ int rx_urb_pending_count;
+ bool shutdown;
+ struct work_struct rx_urb_wq;
+
+ u8 mac_addr[ETH_ALEN];
+ char chip_name[8];
+ u8 cck_tx_power_index_A[3]; /* 0x10 */
+ u8 cck_tx_power_index_B[3];
+ u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */
+ u8 ht40_1s_tx_power_index_B[3];
+ /*
+ * The following entries are half-bytes split as:
+ * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+ */
+ struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+ struct rtl8723au_idx ht20_tx_power_index_diff[3];
+ struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+ struct rtl8723au_idx ht40_max_power_offset[3];
+ struct rtl8723au_idx ht20_max_power_offset[3];
+ u32 chip_cut:4;
+ u32 rom_rev:4;
+ u32 has_wifi:1;
+ u32 has_bluetooth:1;
+ u32 enable_bluetooth:1;
+ u32 has_gps:1;
+ u32 hi_pa:1;
+ u32 vendor_umc:1;
+ u32 has_polarity_ctrl:1;
+ u32 has_eeprom:1;
+ u32 boot_eeprom:1;
+ u32 ep_tx_high_queue:1;
+ u32 ep_tx_normal_queue:1;
+ u32 ep_tx_low_queue:1;
+ u32 path_a_hi_power:1;
+ u32 path_a_rf_paths:4;
+ unsigned int pipe_interrupt;
+ unsigned int pipe_in;
+ unsigned int pipe_out[TXDESC_QUEUE_MAX];
+ u8 out_ep[RTL8XXXU_OUT_ENDPOINTS];
+ u8 path_a_ig_value;
+ u8 ep_tx_count;
+ u8 rf_paths;
+ u8 rx_paths;
+ u8 tx_paths;
+ u32 rf_mode_ag[2];
+ u32 rege94;
+ u32 rege9c;
+ u32 regeb4;
+ u32 regebc;
+ int next_mbox;
+ int nr_out_eps;
+
+ struct mutex h2c_mutex;
+
+ struct usb_anchor rx_anchor;
+ struct usb_anchor tx_anchor;
+ struct usb_anchor int_anchor;
+ struct rtl8xxxu_firmware_header *fw_data;
+ size_t fw_size;
+ struct mutex usb_buf_mutex;
+ union {
+ __le32 val32;
+ __le16 val16;
+ u8 val8;
+ } usb_buf;
+ union {
+ u8 raw[EFUSE_MAP_LEN_8723A];
+ struct rtl8723au_efuse efuse8723;
+ struct rtl8192cu_efuse efuse8192;
+ } efuse_wifi;
+ u32 adda_backup[RTL8XXXU_ADDA_REGS];
+ u32 mac_backup[RTL8XXXU_MAC_REGS];
+ u32 bb_backup[RTL8XXXU_BB_REGS];
+ u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
+ u32 rtlchip;
+ u8 pi_enabled:1;
+ u8 iqk_initialized:1;
+ u8 int_buf[USB_INTR_CONTENT_LENGTH];
+};
+
+struct rtl8xxxu_rx_urb {
+ struct urb urb;
+ struct ieee80211_hw *hw;
+ struct list_head list;
+};
+
+struct rtl8xxxu_tx_urb {
+ struct urb urb;
+ struct ieee80211_hw *hw;
+ struct list_head list;
+};
+
+struct rtl8xxxu_fileops {
+ int (*parse_efuse) (struct rtl8xxxu_priv *priv);
+ int (*load_firmware) (struct rtl8xxxu_priv *priv);
+ int (*power_on) (struct rtl8xxxu_priv *priv);
+ int writeN_block_size;
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
new file mode 100644
index 0000000..23208f7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+/* 0x0000 ~ 0x00FF System Configuration */
+#define REG_SYS_ISO_CTRL 0x0000
+#define SYS_ISO_MD2PP BIT(0)
+#define SYS_ISO_ANALOG_IPS BIT(5)
+#define SYS_ISO_DIOR BIT(9)
+#define SYS_ISO_PWC_EV25V BIT(14)
+#define SYS_ISO_PWC_EV12V BIT(15)
+
+#define REG_SYS_FUNC 0x0002
+#define SYS_FUNC_BBRSTB BIT(0)
+#define SYS_FUNC_BB_GLB_RSTN BIT(1)
+#define SYS_FUNC_USBA BIT(2)
+#define SYS_FUNC_UPLL BIT(3)
+#define SYS_FUNC_USBD BIT(4)
+#define SYS_FUNC_DIO_PCIE BIT(5)
+#define SYS_FUNC_PCIEA BIT(6)
+#define SYS_FUNC_PPLL BIT(7)
+#define SYS_FUNC_PCIED BIT(8)
+#define SYS_FUNC_DIOE BIT(9)
+#define SYS_FUNC_CPU_ENABLE BIT(10)
+#define SYS_FUNC_DCORE BIT(11)
+#define SYS_FUNC_ELDR BIT(12)
+#define SYS_FUNC_DIO_RF BIT(13)
+#define SYS_FUNC_HWPDN BIT(14)
+#define SYS_FUNC_MREGEN BIT(15)
+
+#define REG_APS_FSMCO 0x0004
+#define APS_FSMCO_PFM_ALDN BIT(1)
+#define APS_FSMCO_PFM_WOWL BIT(3)
+#define APS_FSMCO_ENABLE_POWERDOWN BIT(4)
+#define APS_FSMCO_MAC_ENABLE BIT(8)
+#define APS_FSMCO_MAC_OFF BIT(9)
+#define APS_FSMCO_HW_SUSPEND BIT(11)
+#define APS_FSMCO_PCIE BIT(12)
+#define APS_FSMCO_HW_POWERDOWN BIT(15)
+#define APS_FSMCO_WLON_RESET BIT(16)
+
+#define REG_SYS_CLKR 0x0008
+#define SYS_CLK_ANAD16V_ENABLE BIT(0)
+#define SYS_CLK_ANA8M BIT(1)
+#define SYS_CLK_MACSLP BIT(4)
+#define SYS_CLK_LOADER_ENABLE BIT(5)
+#define SYS_CLK_80M_SSC_DISABLE BIT(7)
+#define SYS_CLK_80M_SSC_ENABLE_HO BIT(8)
+#define SYS_CLK_PHY_SSC_RSTB BIT(9)
+#define SYS_CLK_SEC_CLK_ENABLE BIT(10)
+#define SYS_CLK_MAC_CLK_ENABLE BIT(11)
+#define SYS_CLK_ENABLE BIT(12)
+#define SYS_CLK_RING_CLK_ENABLE BIT(13)
+
+#define REG_9346CR 0x000a
+#define EEPROM_BOOT BIT(4)
+#define EEPROM_ENABLE BIT(5)
+
+#define REG_EE_VPD 0x000c
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001c
+
+#define REG_RF_CTRL 0x001f
+#define RF_ENABLE BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+#define REG_LDOA15_CTRL 0x0020
+#define LDOA15_ENABLE BIT(0)
+#define LDOA15_STANDBY BIT(1)
+#define LDOA15_OBUF BIT(2)
+#define LDOA15_REG_VOS BIT(3)
+#define LDOA15_VOADJ_SHIFT 4
+
+#define REG_LDOV12D_CTRL 0x0021
+#define LDOV12D_ENABLE BIT(0)
+#define LDOV12D_STANDBY BIT(1)
+#define LDOV12D_VADJ_SHIFT 4
+
+#define REG_LDOHCI12_CTRL 0x0022
+
+#define REG_LPLDO_CTRL 0x0023
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+
+#define REG_AFE_XTAL_CTRL 0x0024
+#define AFE_XTAL_ENABLE BIT(0)
+#define AFE_XTAL_B_SELECT BIT(1)
+#define AFE_XTAL_GATE_USB BIT(8)
+#define AFE_XTAL_GATE_AFE BIT(11)
+#define AFE_XTAL_RF_GATE BIT(14)
+#define AFE_XTAL_GATE_DIG BIT(17)
+#define AFE_XTAL_BT_GATE BIT(20)
+
+#define REG_AFE_PLL_CTRL 0x0028
+#define AFE_PLL_ENABLE BIT(0)
+#define AFE_PLL_320_ENABLE BIT(1)
+#define APE_PLL_FREF_SELECT BIT(2)
+#define AFE_PLL_EDGE_SELECT BIT(3)
+#define AFE_PLL_WDOGB BIT(4)
+#define AFE_PLL_LPF_ENABLE BIT(5)
+
+#define REG_MAC_PHY_CTRL 0x002c
+
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define EFUSE_TRPT BIT(7)
+ /* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define EFUSE_CELL_SEL (BIT(8) | BIT(9))
+#define EFUSE_LDOE25_ENABLE BIT(31)
+#define EFUSE_SELECT_MASK 0x0300
+#define EFUSE_WIFI_SELECT 0x0000
+#define EFUSE_BT0_SELECT 0x0100
+#define EFUSE_BT1_SELECT 0x0200
+#define EFUSE_BT2_SELECT 0x0300
+
+#define EFUSE_ACCESS_ENABLE 0x69 /* RTL8723 only */
+#define EFUSE_ACCESS_DISABLE 0x00 /* RTL8723 only */
+
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003c
+#define REG_ACLK_MON 0x003e
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004c
+#define REG_LEDCFG1 0x004d
+#define REG_LEDCFG2 0x004e
+#define LEDCFG2_DPDT_SELECT BIT(7)
+#define REG_LEDCFG3 0x004f
+#define REG_LEDCFG REG_LEDCFG2
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2 0x0060
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2 0x0062
+
+/* RTL8723 only WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL 0x0068
+
+#define MULTI_FN_WIFI_HW_PWRDOWN_EN BIT(0) /* Enable GPIO[9] as WiFi HW
+ powerdown source */
+#define MULTI_FN_WIFI_HW_PWRDOWN_SL BIT(1) /* WiFi HW powerdown polarity
+ control */
+#define MULTI_WIFI_FUNC_EN BIT(2) /* WiFi function enable */
+
+#define MULTI_WIFI_HW_ROF_EN BIT(3) /* Enable GPIO[9] as WiFi RF HW
+ powerdown source */
+#define MULTI_BT_HW_PWRDOWN_EN BIT(16) /* Enable GPIO[11] as BT HW
+ powerdown source */
+#define MULTI_BT_HW_PWRDOWN_SL BIT(17) /* BT HW powerdown polarity
+ control */
+#define MULTI_BT_FUNC_EN BIT(18) /* BT function enable */
+#define MULTI_BT_HW_ROF_EN BIT(19) /* Enable GPIO[11] as BT/GPS
+ RF HW powerdown source */
+#define MULTI_GPS_HW_PWRDOWN_EN BIT(20) /* Enable GPIO[10] as GPS HW
+ powerdown source */
+#define MULTI_GPS_HW_PWRDOWN_SL BIT(21) /* GPS HW powerdown polarity
+ control */
+#define MULTI_GPS_FUNC_EN BIT(22) /* GPS function enable */
+
+#define REG_MCU_FW_DL 0x0080
+#define MCU_FW_DL_ENABLE BIT(0)
+#define MCU_FW_DL_READY BIT(1)
+#define MCU_FW_DL_CSUM_REPORT BIT(2)
+#define MCU_MAC_INIT_READY BIT(3)
+#define MCU_BB_INIT_READY BIT(4)
+#define MCU_RF_INIT_READY BIT(5)
+#define MCU_WINT_INIT_READY BIT(6)
+#define MCU_FW_RAM_SEL BIT(7) /* 1: RAM, 0:ROM */
+#define MCU_CP_RESET BIT(23)
+
+#define REG_HMBOX_EXT_0 0x0088
+#define REG_HMBOX_EXT_1 0x008a
+#define REG_HMBOX_EXT_2 0x008c
+#define REG_HMBOX_EXT_3 0x008e
+/* Host suspend counter on FPGA platform */
+#define REG_HOST_SUSP_CNT 0x00bc
+/* Efuse access protection for RTL8723 */
+#define REG_EFUSE_ACCESS 0x00cf
+#define REG_BIST_SCAN 0x00d0
+#define REG_BIST_RPT 0x00d4
+#define REG_BIST_ROM_RPT 0x00d8
+#define REG_USB_SIE_INTF 0x00e0
+#define REG_PCIE_MIO_INTF 0x00e4
+#define REG_PCIE_MIO_INTD 0x00e8
+#define REG_HPON_FSM 0x00ec
+#define HPON_FSM_BONDING_MASK (BIT(22) | BIT(23))
+#define HPON_FSM_BONDING_1T2R BIT(22)
+#define REG_SYS_CFG 0x00f0
+#define SYS_CFG_XCLK_VLD BIT(0)
+#define SYS_CFG_ACLK_VLD BIT(1)
+#define SYS_CFG_UCLK_VLD BIT(2)
+#define SYS_CFG_PCLK_VLD BIT(3)
+#define SYS_CFG_PCIRSTB BIT(4)
+#define SYS_CFG_V15_VLD BIT(5)
+#define SYS_CFG_TRP_B15V_EN BIT(7)
+#define SYS_CFG_SIC_IDLE BIT(8)
+#define SYS_CFG_BD_MAC2 BIT(9)
+#define SYS_CFG_BD_MAC1 BIT(10)
+#define SYS_CFG_IC_MACPHY_MODE BIT(11)
+#define SYS_CFG_CHIP_VER (BIT(12) | BIT(13) | BIT(14) | BIT(15))
+#define SYS_CFG_BT_FUNC BIT(16)
+#define SYS_CFG_VENDOR_ID BIT(19)
+#define SYS_CFG_PAD_HWPD_IDN BIT(22)
+#define SYS_CFG_TRP_VAUX_EN BIT(23)
+#define SYS_CFG_TRP_BT_EN BIT(24)
+#define SYS_CFG_BD_PKG_SEL BIT(25)
+#define SYS_CFG_BD_HCI_SEL BIT(26)
+#define SYS_CFG_TYPE_ID BIT(27)
+#define SYS_CFG_RTL_ID BIT(23) /* TestChip ID,
+ 1:Test(RLE); 0:MP(RL) */
+#define SYS_CFG_SPS_SEL BIT(24) /* 1:LDO regulator mode;
+ 0:Switching regulator mode*/
+#define SYS_CFG_CHIP_VERSION_MASK 0xf000 /* Bit 12 - 15 */
+#define SYS_CFG_CHIP_VERSION_SHIFT 12
+
+#define REG_GPIO_OUTSTS 0x00f4 /* For RTL8723 only. */
+#define GPIO_EFS_HCI_SEL (BIT(0) | BIT(1))
+#define GPIO_PAD_HCI_SEL (BIT(2) | BIT(3))
+#define GPIO_HCI_SEL (BIT(4) | BIT(5))
+#define GPIO_PKG_SEL_HCI BIT(6)
+#define GPIO_FEN_GPS BIT(7)
+#define GPIO_FEN_BT BIT(8)
+#define GPIO_FEN_WL BIT(9)
+#define GPIO_FEN_PCI BIT(10)
+#define GPIO_FEN_USB BIT(11)
+#define GPIO_BTRF_HWPDN_N BIT(12)
+#define GPIO_WLRF_HWPDN_N BIT(13)
+#define GPIO_PDN_BT_N BIT(14)
+#define GPIO_PDN_GPS_N BIT(15)
+#define GPIO_BT_CTL_HWPDN BIT(16)
+#define GPIO_GPS_CTL_HWPDN BIT(17)
+#define GPIO_PPHY_SUSB BIT(20)
+#define GPIO_UPHY_SUSB BIT(21)
+#define GPIO_PCI_SUSEN BIT(22)
+#define GPIO_USB_SUSEN BIT(23)
+#define GPIO_RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+/* 0x0100 ~ 0x01FF MACTOP General Configuration */
+#define REG_CR 0x0100
+#define CR_HCI_TXDMA_ENABLE BIT(0)
+#define CR_HCI_RXDMA_ENABLE BIT(1)
+#define CR_TXDMA_ENABLE BIT(2)
+#define CR_RXDMA_ENABLE BIT(3)
+#define CR_PROTOCOL_ENABLE BIT(4)
+#define CR_SCHEDULE_ENABLE BIT(5)
+#define CR_MAC_TX_ENABLE BIT(6)
+#define CR_MAC_RX_ENABLE BIT(7)
+#define CR_SW_BEACON_ENABLE BIT(8)
+#define CR_SECURITY_ENABLE BIT(9)
+#define CR_CALTIMER_ENABLE BIT(10)
+
+/* Media Status Register */
+#define REG_MSR 0x0102
+#define MSR_LINKTYPE_MASK 0x3
+#define MSR_LINKTYPE_NONE 0x0
+#define MSR_LINKTYPE_ADHOC 0x1
+#define MSR_LINKTYPE_STATION 0x2
+#define MSR_LINKTYPE_AP 0x3
+
+#define REG_PBP 0x0104
+#define PBP_PAGE_SIZE_RX_SHIFT 0
+#define PBP_PAGE_SIZE_TX_SHIFT 4
+#define PBP_PAGE_SIZE_64 0x0
+#define PBP_PAGE_SIZE_128 0x1
+#define PBP_PAGE_SIZE_256 0x2
+#define PBP_PAGE_SIZE_512 0x3
+#define PBP_PAGE_SIZE_1024 0x4
+
+#define REG_TRXDMA_CTRL 0x010c
+#define TRXDMA_CTRL_VOQ_SHIFT 4
+#define TRXDMA_CTRL_VIQ_SHIFT 6
+#define TRXDMA_CTRL_BEQ_SHIFT 8
+#define TRXDMA_CTRL_BKQ_SHIFT 10
+#define TRXDMA_CTRL_MGQ_SHIFT 12
+#define TRXDMA_CTRL_HIQ_SHIFT 14
+#define TRXDMA_QUEUE_LOW 1
+#define TRXDMA_QUEUE_NORMAL 2
+#define TRXDMA_QUEUE_HIGH 3
+
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011c
+#define REG_HIMR 0x0120
+#define REG_HISR 0x0124
+#define REG_HIMRE 0x0128
+#define REG_HISRE 0x012c
+#define REG_CPWM 0x012f
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015c
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017c
+#define REG_C2HEVT_MSG_NORMAL 0x01a0
+#define REG_C2HEVT_CLEAR 0x01af
+#define REG_C2HEVT_MSG_TEST 0x01b8
+#define REG_MCUTST_1 0x01c0
+#define REG_FMTHR 0x01c8
+#define REG_HMTFR 0x01cc
+#define REG_HMBOX_0 0x01d0
+#define REG_HMBOX_1 0x01d4
+#define REG_HMBOX_2 0x01d8
+#define REG_HMBOX_3 0x01dc
+
+#define REG_LLT_INIT 0x01e0
+#define LLT_OP_INACTIVE 0x0
+#define LLT_OP_WRITE (0x1 << 30)
+#define LLT_OP_READ (0x2 << 30)
+#define LLT_OP_MASK (0x3 << 30)
+
+#define REG_BB_ACCEESS_CTRL 0x01e8
+#define REG_BB_ACCESS_DATA 0x01ec
+
+/* 0x0200 ~ 0x027F TXDMA Configuration */
+#define REG_RQPN 0x0200
+#define RQPN_HI_PQ_SHIFT 0
+#define RQPN_LO_PQ_SHIFT 8
+#define RQPN_NORM_PQ_SHIFT 16
+#define RQPN_LOAD BIT(31)
+
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020c
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+/* 0x0280 ~ 0x02FF RXDMA Configuration */
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXPKT_NUM 0x0284
+#define REG_RXDMA_STATUS 0x0288
+
+#define REG_RF_BB_CMD_ADDR 0x02c0
+#define REG_RF_BB_CMD_DATA 0x02c4
+
+/* spec version 11 */
+/* 0x0400 ~ 0x047F Protocol Configuration */
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040c
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+
+#define REG_CPU_MGQ_INFORMATION 0x041c
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define FWHW_TXQ_CTRL_AMPDU_RETRY BIT(7)
+#define FWHW_TXQ_CTRL_XMIT_MGMT_ACK BIT(12)
+
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_LIFETIME_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+
+#define REG_SPEC_SIFS 0x0428
+#define SPEC_SIFS_CCK_MASK 0x00ff
+#define SPEC_SIFS_CCK_SHIFT 0
+#define SPEC_SIFS_OFDM_MASK 0xff00
+#define SPEC_SIFS_OFDM_SHIFT 8
+
+#define REG_RETRY_LIMIT 0x042a
+#define RETRY_LIMIT_LONG_SHIFT 0
+#define RETRY_LIMIT_LONG_MASK 0x003f
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_SHORT_MASK 0x3f00
+
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RESPONSE_RATE_SET 0x0440
+#define RESPONSE_RATE_BITMAP_ALL 0xfffff
+#define RESPONSE_RATE_RRSR_CCK_ONLY_1M 0xffff1
+#define RSR_1M BIT(0)
+#define RSR_2M BIT(1)
+#define RSR_5_5M BIT(2)
+#define RSR_11M BIT(3)
+#define RSR_6M BIT(4)
+#define RSR_9M BIT(5)
+#define RSR_12M BIT(6)
+#define RSR_18M BIT(7)
+#define RSR_24M BIT(8)
+#define RSR_36M BIT(9)
+#define RSR_48M BIT(10)
+#define RSR_54M BIT(11)
+#define RSR_MCS0 BIT(12)
+#define RSR_MCS1 BIT(13)
+#define RSR_MCS2 BIT(14)
+#define RSR_MCS3 BIT(15)
+#define RSR_MCS4 BIT(16)
+#define RSR_MCS5 BIT(17)
+#define RSR_MCS6 BIT(18)
+#define RSR_MCS7 BIT(19)
+#define RSR_RSC_LOWER_SUB_CHANNEL BIT(21) /* 0x200000 */
+#define RSR_RSC_UPPER_SUB_CHANNEL BIT(22) /* 0x400000 */
+#define RSR_RSC_BANDWIDTH_40M (RSR_RSC_UPPER_SUB_CHANNEL | \
+ RSR_RSC_LOWER_SUB_CHANNEL)
+#define RSR_ACK_SHORT_PREAMBLE BIT(23)
+
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044c
+#define REG_ARFR3 0x0450
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045c
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045d
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+
+#define REG_POWER_STATUS 0x04a4
+#define REG_POWER_STAGE1 0x04b4
+#define REG_POWER_STAGE2 0x04b8
+#define REG_PKT_VO_VI_LIFE_TIME 0x04c0
+#define REG_PKT_BE_BK_LIFE_TIME 0x04c2
+#define REG_STBC_SETTING 0x04c4
+#define REG_PROT_MODE_CTRL 0x04c8
+#define REG_MAX_AGGR_NUM 0x04ca
+#define REG_RTS_MAX_AGGR_NUM 0x04cb
+#define REG_BAR_MODE_CTRL 0x04cc
+#define REG_RA_TRY_RATE_AGG_LMT 0x04cf
+#define REG_NQOS_SEQ 0x04dc
+#define REG_QOS_SEQ 0x04de
+#define REG_NEED_CPU_HANDLE 0x04e0
+#define REG_PKT_LOSE_RPT 0x04e1
+#define REG_PTCL_ERR_STATUS 0x04e2
+#define REG_DUMMY 0x04fc
+
+/* 0x0500 ~ 0x05FF EDCA Configuration */
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050c
+#define EDCA_PARAM_ECW_MIN_SHIFT 8
+#define EDCA_PARAM_ECW_MAX_SHIFT 12
+#define EDCA_PARAM_TXOP_SHIFT 16
+#define REG_BEACON_TCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CCK 0x0514
+#define REG_SIFS_OFDM 0x0516
+#define REG_TSFTR_SYN_OFFSET 0x0518
+#define REG_AGGR_BREAK_TIME 0x051a
+#define REG_SLOT 0x051b
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+
+#define REG_BEACON_CTRL 0x0550
+#define REG_BEACON_CTRL_1 0x0551
+#define BEACON_ATIM BIT(0)
+#define BEACON_CTRL_MBSSID BIT(1)
+#define BEACON_CTRL_TX_BEACON_RPT BIT(2)
+#define BEACON_FUNCTION_ENABLE BIT(3)
+#define BEACON_DISABLE_TSF_UPDATE BIT(4)
+
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define DUAL_TSF_RESET_TSF0 BIT(0)
+#define DUAL_TSF_RESET_TSF1 BIT(1)
+#define DUAL_TSF_RESET_P2P BIT(4)
+#define DUAL_TSF_TX_OK BIT(5)
+
+/* The same as REG_MBSSID_BCN_SPACE */
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+
+#define REG_DRIVER_EARLY_INT 0x0558
+#define DRIVER_EARLY_INT_TIME 5
+
+#define REG_BEACON_DMA_TIME 0x0559
+#define BEACON_DMA_ATIME_INT_TIME 2
+
+#define REG_ATIMWND 0x055a
+#define REG_BCN_MAX_ERR 0x055d
+#define REG_RXTSF_OFFSET_CCK 0x055e
+#define REG_RXTSF_OFFSET_OFDM 0x055f
+#define REG_TSFTR 0x0560
+#define REG_TSFTR1 0x0568
+#define REG_INIT_TSFTR 0x0564
+#define REG_ATIMWND_1 0x0570
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACM_HW_CTRL 0x05c0
+#define ACM_HW_CTRL_BK BIT(0)
+#define ACM_HW_CTRL_BE BIT(1)
+#define ACM_HW_CTRL_VI BIT(2)
+#define ACM_HW_CTRL_VO BIT(3)
+#define REG_ACM_RST_CTRL 0x05c1
+#define REG_ACMAVG 0x05c2
+#define REG_VO_ADMTIME 0x05c4
+#define REG_VI_ADMTIME 0x05c6
+#define REG_BE_ADMTIME 0x05c8
+#define REG_EDCA_RANDOM_GEN 0x05cc
+#define REG_SCH_TXCMD 0x05d0
+
+/* define REG_FW_TSF_SYNC_CNT 0x04a0 */
+#define REG_FW_RESET_TSF_CNT_1 0x05fc
+#define REG_FW_RESET_TSF_CNT_0 0x05fd
+#define REG_FW_BCN_DIS_CNT 0x05fe
+
+/* 0x0600 ~ 0x07FF WMAC Configuration */
+#define REG_APSD_CTRL 0x0600
+#define APSD_CTRL_OFF BIT(6)
+#define APSD_CTRL_OFF_STATUS BIT(7)
+#define REG_BW_OPMODE 0x0603
+#define BW_OPMODE_20MHZ BIT(2)
+#define BW_OPMODE_5G BIT(1)
+#define BW_OPMODE_11J BIT(0)
+
+#define REG_TCR 0x0604
+
+/* Receive Configuration Register */
+#define REG_RCR 0x0608
+#define RCR_ACCEPT_AP BIT(0) /* Accept all unicast packet */
+#define RCR_ACCEPT_PHYS_MATCH BIT(1) /* Accept phys match packet */
+#define RCR_ACCEPT_MCAST BIT(2)
+#define RCR_ACCEPT_BCAST BIT(3)
+#define RCR_ACCEPT_ADDR3 BIT(4) /* Accept address 3 match
+ packet */
+#define RCR_ACCEPT_PM BIT(5) /* Accept power management
+ packet */
+#define RCR_CHECK_BSSID_MATCH BIT(6) /* Accept BSSID match packet */
+#define RCR_CHECK_BSSID_BEACON BIT(7) /* Accept BSSID match packet
+ (Rx beacon, probe rsp) */
+#define RCR_ACCEPT_CRC32 BIT(8) /* Accept CRC32 error packet */
+#define RCR_ACCEPT_ICV BIT(9) /* Accept ICV error packet */
+#define RCR_ACCEPT_DATA_FRAME BIT(11)
+#define RCR_ACCEPT_CTRL_FRAME BIT(12)
+#define RCR_ACCEPT_MGMT_FRAME BIT(13)
+#define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */
+#define RCR_MFBEN BIT(22)
+#define RCR_LSIGEN BIT(23)
+#define RCR_MULTI_BSSID_ENABLE BIT(24) /* Enable Multiple BssId */
+#define RCR_ACCEPT_BA_SSN BIT(27) /* Accept BA SSN */
+#define RCR_APPEND_PHYSTAT BIT(28)
+#define RCR_APPEND_ICV BIT(29)
+#define RCR_APPEND_MIC BIT(30)
+#define RCR_APPEND_FCS BIT(31) /* WMAC append FCS after */
+
+#define REG_RX_PKT_LIMIT 0x060c
+#define REG_RX_DLK_TIME 0x060d
+#define REG_RX_DRVINFO_SZ 0x060f
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063a
+
+/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+ /* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS 0x063c
+ /* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS 0x063e
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+/* WMA, BA, CCX */
+#define REG_NAV_CTRL 0x0650
+/* In units of 128us */
+#define REG_NAV_UPPER 0x0652
+#define NAV_UPPER_UNIT 128
+
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+
+/* Security */
+#define REG_CAM_CMD 0x0670
+#define CAM_CMD_POLLING BIT(31)
+#define CAM_CMD_WRITE BIT(16)
+#define CAM_CMD_KEY_SHIFT 3
+#define REG_CAM_WRITE 0x0674
+#define CAM_WRITE_VALID BIT(15)
+#define REG_CAM_READ 0x0678
+#define REG_CAM_DEBUG 0x067c
+#define REG_SECURITY_CFG 0x0680
+#define SEC_CFG_TX_USE_DEFKEY BIT(0)
+#define SEC_CFG_RX_USE_DEFKEY BIT(1)
+#define SEC_CFG_TX_SEC_ENABLE BIT(2)
+#define SEC_CFG_RX_SEC_ENABLE BIT(3)
+#define SEC_CFG_SKBYA2 BIT(4)
+#define SEC_CFG_NO_SKMC BIT(5)
+#define SEC_CFG_TXBC_USE_DEFKEY BIT(6)
+#define SEC_CFG_RXBC_USE_DEFKEY BIT(7)
+
+/* Power */
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_CMD 0x0698
+#define REG_WKFMCAM_RWD 0x069c
+#define REG_RXFLTMAP0 0x06a0
+#define REG_RXFLTMAP1 0x06a2
+#define REG_RXFLTMAP2 0x06a4
+#define REG_BCN_PSR_RPT 0x06a8
+#define REG_CALB32K_CTRL 0x06ac
+#define REG_PKT_MON_CTRL 0x06b4
+#define REG_BT_COEX_TABLE 0x06c0
+#define REG_WMAC_RESP_TXINFO 0x06d8
+
+#define REG_MACID1 0x0700
+#define REG_BSSID1 0x0708
+
+#define REG_FPGA0_RF_MODE 0x0800
+#define FPGA_RF_MODE BIT(0)
+#define FPGA_RF_MODE_JAPAN BIT(1)
+#define FPGA_RF_MODE_CCK BIT(24)
+#define FPGA_RF_MODE_OFDM BIT(25)
+
+#define REG_FPGA0_TX_INFO 0x0804
+#define REG_FPGA0_PSD_FUNC 0x0808
+#define REG_FPGA0_TX_GAIN 0x080c
+#define REG_FPGA0_RF_TIMING1 0x0810
+#define REG_FPGA0_RF_TIMING2 0x0814
+#define REG_FPGA0_POWER_SAVE 0x0818
+#define FPGA0_PS_LOWER_CHANNEL BIT(26)
+#define FPGA0_PS_UPPER_CHANNEL BIT(27)
+
+#define REG_FPGA0_XA_HSSI_PARM1 0x0820 /* RF 3 wire register */
+#define FPGA0_HSSI_PARM1_PI BIT(8)
+#define REG_FPGA0_XA_HSSI_PARM2 0x0824
+#define REG_FPGA0_XB_HSSI_PARM1 0x0828
+#define REG_FPGA0_XB_HSSI_PARM2 0x082c
+#define FPGA0_HSSI_3WIRE_DATA_LEN 0x800
+#define FPGA0_HSSI_3WIRE_ADDR_LEN 0x400
+#define FPGA0_HSSI_PARM2_ADDR_SHIFT 23
+#define FPGA0_HSSI_PARM2_ADDR_MASK 0x7f800000 /* 0xff << 23 */
+#define FPGA0_HSSI_PARM2_CCK_HIGH_PWR BIT(9)
+#define FPGA0_HSSI_PARM2_EDGE_READ BIT(31)
+
+#define REG_TX_AGC_B_RATE18_06 0x0830
+#define REG_TX_AGC_B_RATE54_24 0x0834
+#define REG_TX_AGC_B_CCK1_55_MCS32 0x0838
+#define REG_TX_AGC_B_MCS03_MCS00 0x083c
+
+#define REG_FPGA0_XA_LSSI_PARM 0x0840
+#define REG_FPGA0_XB_LSSI_PARM 0x0844
+#define FPGA0_LSSI_PARM_ADDR_SHIFT 20
+#define FPGA0_LSSI_PARM_ADDR_MASK 0x0ff00000
+#define FPGA0_LSSI_PARM_DATA_MASK 0x000fffff
+
+#define REG_TX_AGC_B_MCS07_MCS04 0x0848
+#define REG_TX_AGC_B_MCS11_MCS08 0x084c
+
+#define REG_FPGA0_XCD_SWITCH_CTRL 0x085c
+
+#define REG_FPGA0_XA_RF_INT_OE 0x0860 /* RF Channel switch */
+#define REG_FPGA0_XB_RF_INT_OE 0x0864
+#define FPGA0_INT_OE_ANTENNA_AB_OPEN 0x000
+#define FPGA0_INT_OE_ANTENNA_A BIT(8)
+#define FPGA0_INT_OE_ANTENNA_B BIT(9)
+#define FPGA0_INT_OE_ANTENNA_MASK (FPGA0_INT_OE_ANTENNA_A | \
+ FPGA0_INT_OE_ANTENNA_B)
+
+#define REG_TX_AGC_B_MCS15_MCS12 0x0868
+#define REG_TX_AGC_B_CCK11_A_CCK2_11 0x086c
+
+#define REG_FPGA0_XAB_RF_SW_CTRL 0x0870
+#define REG_FPGA0_XA_RF_SW_CTRL 0x0870 /* 16 bit */
+#define REG_FPGA0_XB_RF_SW_CTRL 0x0872 /* 16 bit */
+#define REG_FPGA0_XCD_RF_SW_CTRL 0x0874
+#define REG_FPGA0_XC_RF_SW_CTRL 0x0874 /* 16 bit */
+#define REG_FPGA0_XD_RF_SW_CTRL 0x0876 /* 16 bit */
+#define FPGA0_RF_3WIRE_DATA BIT(0)
+#define FPGA0_RF_3WIRE_CLOC BIT(1)
+#define FPGA0_RF_3WIRE_LOAD BIT(2)
+#define FPGA0_RF_3WIRE_RW BIT(3)
+#define FPGA0_RF_3WIRE_MASK 0xf
+#define FPGA0_RF_RFENV BIT(4)
+#define FPGA0_RF_TRSW BIT(5) /* Useless now */
+#define FPGA0_RF_TRSWB BIT(6)
+#define FPGA0_RF_ANTSW BIT(8)
+#define FPGA0_RF_ANTSWB BIT(9)
+#define FPGA0_RF_PAPE BIT(10)
+#define FPGA0_RF_PAPE5G BIT(11)
+#define FPGA0_RF_BD_CTRL_SHIFT 16
+
+#define REG_FPGA0_XAB_RF_PARM 0x0878 /* Antenna select path in ODM */
+#define REG_FPGA0_XA_RF_PARM 0x0878 /* 16 bit */
+#define REG_FPGA0_XB_RF_PARM 0x087a /* 16 bit */
+#define REG_FPGA0_XCD_RF_PARM 0x087c
+#define REG_FPGA0_XC_RF_PARM 0x087c /* 16 bit */
+#define REG_FPGA0_XD_RF_PARM 0x087e /* 16 bit */
+#define FPGA0_RF_PARM_RFA_ENABLE BIT(1)
+#define FPGA0_RF_PARM_RFB_ENABLE BIT(17)
+#define FPGA0_RF_PARM_CLK_GATE BIT(31)
+
+#define REG_FPGA0_ANALOG1 0x0880
+#define REG_FPGA0_ANALOG2 0x0884
+#define FPGA0_ANALOG2_20MHZ BIT(10)
+#define REG_FPGA0_ANALOG3 0x0888
+#define REG_FPGA0_ANALOG4 0x088c
+
+#define REG_FPGA0_XA_LSSI_READBACK 0x08a0 /* Tranceiver LSSI Readback */
+#define REG_FPGA0_XB_LSSI_READBACK 0x08a4
+#define REG_HSPI_XA_READBACK 0x08b8 /* Transceiver A HSPI read */
+#define REG_HSPI_XB_READBACK 0x08bc /* Transceiver B HSPI read */
+
+#define REG_FPGA1_RF_MODE 0x0900
+
+#define REG_FPGA1_TX_INFO 0x090c
+
+#define REG_CCK0_SYSTEM 0x0a00
+#define CCK0_SIDEBAND BIT(4)
+
+#define REG_CCK0_AFE_SETTING 0x0a04
+
+#define REG_CONFIG_ANT_A 0x0b68
+#define REG_CONFIG_ANT_B 0x0b6c
+
+#define REG_OFDM0_TRX_PATH_ENABLE 0x0c04
+#define OFDM_RF_PATH_RX_MASK 0x0f
+#define OFDM_RF_PATH_RX_A BIT(0)
+#define OFDM_RF_PATH_RX_B BIT(1)
+#define OFDM_RF_PATH_RX_C BIT(2)
+#define OFDM_RF_PATH_RX_D BIT(3)
+#define OFDM_RF_PATH_TX_MASK 0xf0
+#define OFDM_RF_PATH_TX_A BIT(4)
+#define OFDM_RF_PATH_TX_B BIT(5)
+#define OFDM_RF_PATH_TX_C BIT(6)
+#define OFDM_RF_PATH_TX_D BIT(7)
+
+#define REG_OFDM0_TR_MUX_PAR 0x0c08
+
+#define REG_OFDM0_XA_RX_IQ_IMBALANCE 0x0c14
+#define REG_OFDM0_XB_RX_IQ_IMBALANCE 0x0c1c
+
+#define REG_OFDM0_ENERGY_CCA_THRES 0x0c4c
+
+#define REG_OFDM0_XA_AGC_CORE1 0x0c50
+#define REG_OFDM0_XA_AGC_CORE2 0x0c54
+#define REG_OFDM0_XB_AGC_CORE1 0x0c58
+#define REG_OFDM0_XB_AGC_CORE2 0x0c5c
+#define REG_OFDM0_XC_AGC_CORE1 0x0c60
+#define REG_OFDM0_XC_AGC_CORE2 0x0c64
+#define REG_OFDM0_XD_AGC_CORE1 0x0c68
+#define REG_OFDM0_XD_AGC_CORE2 0x0c6c
+#define OFDM0_X_AGC_CORE1_IGI_MASK 0x0000007F
+
+#define REG_OFDM0_AGC_PARM1 0x0c70
+
+#define REG_OFDM0_AGCR_SSI_TABLE 0x0c78
+
+#define REG_OFDM0_XA_TX_IQ_IMBALANCE 0x0c80
+#define REG_OFDM0_XB_TX_IQ_IMBALANCE 0x0c88
+#define REG_OFDM0_XC_TX_IQ_IMBALANCE 0x0c90
+#define REG_OFDM0_XD_TX_IQ_IMBALANCE 0x0c98
+
+#define REG_OFDM0_XC_TX_AFE 0x0c94
+#define REG_OFDM0_XD_TX_AFE 0x0c9c
+
+#define REG_OFDM0_RX_IQ_EXT_ANTA 0x0ca0
+
+#define REG_OFDM1_LSTF 0x0d00
+#define OFDM_LSTF_PRIME_CH_LOW BIT(10)
+#define OFDM_LSTF_PRIME_CH_HIGH BIT(11)
+#define OFDM_LSTF_PRIME_CH_MASK (OFDM_LSTF_PRIME_CH_LOW | \
+ OFDM_LSTF_PRIME_CH_HIGH)
+#define OFDM_LSTF_CONTINUE_TX BIT(28)
+#define OFDM_LSTF_SINGLE_CARRIER BIT(29)
+#define OFDM_LSTF_SINGLE_TONE BIT(30)
+#define OFDM_LSTF_MASK 0x70000000
+
+#define REG_OFDM1_TRX_PATH_ENABLE 0x0d04
+
+#define REG_TX_AGC_A_RATE18_06 0x0e00
+#define REG_TX_AGC_A_RATE54_24 0x0e04
+#define REG_TX_AGC_A_CCK1_MCS32 0x0e08
+#define REG_TX_AGC_A_MCS03_MCS00 0x0e10
+#define REG_TX_AGC_A_MCS07_MCS04 0x0e14
+#define REG_TX_AGC_A_MCS11_MCS08 0x0e18
+#define REG_TX_AGC_A_MCS15_MCS12 0x0e1c
+
+#define REG_FPGA0_IQK 0x0e28
+
+#define REG_TX_IQK_TONE_A 0x0e30
+#define REG_RX_IQK_TONE_A 0x0e34
+#define REG_TX_IQK_PI_A 0x0e38
+#define REG_RX_IQK_PI_A 0x0e3c
+
+#define REG_TX_IQK 0x0e40
+#define REG_RX_IQK 0x0e44
+#define REG_IQK_AGC_PTS 0x0e48
+#define REG_IQK_AGC_RSP 0x0e4c
+#define REG_TX_IQK_TONE_B 0x0e50
+#define REG_RX_IQK_TONE_B 0x0e54
+#define REG_TX_IQK_PI_B 0x0e58
+#define REG_RX_IQK_PI_B 0x0e5c
+#define REG_IQK_AGC_CONT 0x0e60
+
+#define REG_BLUETOOTH 0x0e6c
+#define REG_RX_WAIT_CCA 0x0e70
+#define REG_TX_CCK_RFON 0x0e74
+#define REG_TX_CCK_BBON 0x0e78
+#define REG_TX_OFDM_RFON 0x0e7c
+#define REG_TX_OFDM_BBON 0x0e80
+#define REG_TX_TO_RX 0x0e84
+#define REG_TX_TO_TX 0x0e88
+#define REG_RX_CCK 0x0e8c
+
+#define REG_TX_POWER_BEFORE_IQK_A 0x0e94
+#define REG_TX_POWER_AFTER_IQK_A 0x0e9c
+
+#define REG_RX_POWER_BEFORE_IQK_A 0x0ea0
+#define REG_RX_POWER_BEFORE_IQK_A_2 0x0ea4
+#define REG_RX_POWER_AFTER_IQK_A 0x0ea8
+#define REG_RX_POWER_AFTER_IQK_A_2 0x0eac
+
+#define REG_TX_POWER_BEFORE_IQK_B 0x0eb4
+#define REG_TX_POWER_AFTER_IQK_B 0x0ebc
+
+#define REG_RX_POWER_BEFORE_IQK_B 0x0ec0
+#define REG_RX_POWER_BEFORE_IQK_B_2 0x0ec4
+#define REG_RX_POWER_AFTER_IQK_B 0x0ec8
+#define REG_RX_POWER_AFTER_IQK_B_2 0x0ecc
+
+#define REG_RX_OFDM 0x0ed0
+#define REG_RX_WAIT_RIFS 0x0ed4
+#define REG_RX_TO_RX 0x0ed8
+#define REG_STANDBY 0x0edc
+#define REG_SLEEP 0x0ee0
+#define REG_PMPD_ANAEN 0x0eec
+
+#define REG_FW_START_ADDRESS 0x1000
+
+#define REG_USB_INFO 0xfe17
+#define REG_USB_HIMR 0xfe38
+#define USB_HIMR_TIMEOUT2 BIT(31)
+#define USB_HIMR_TIMEOUT1 BIT(30)
+#define USB_HIMR_PSTIMEOUT BIT(29)
+#define USB_HIMR_GTINT4 BIT(28)
+#define USB_HIMR_GTINT3 BIT(27)
+#define USB_HIMR_TXBCNERR BIT(26)
+#define USB_HIMR_TXBCNOK BIT(25)
+#define USB_HIMR_TSF_BIT32_TOGGLE BIT(24)
+#define USB_HIMR_BCNDMAINT3 BIT(23)
+#define USB_HIMR_BCNDMAINT2 BIT(22)
+#define USB_HIMR_BCNDMAINT1 BIT(21)
+#define USB_HIMR_BCNDMAINT0 BIT(20)
+#define USB_HIMR_BCNDOK3 BIT(19)
+#define USB_HIMR_BCNDOK2 BIT(18)
+#define USB_HIMR_BCNDOK1 BIT(17)
+#define USB_HIMR_BCNDOK0 BIT(16)
+#define USB_HIMR_HSISR_IND BIT(15)
+#define USB_HIMR_BCNDMAINT_E BIT(14)
+/* RSVD BIT(13) */
+#define USB_HIMR_CTW_END BIT(12)
+/* RSVD BIT(11) */
+#define USB_HIMR_C2HCMD BIT(10)
+#define USB_HIMR_CPWM2 BIT(9)
+#define USB_HIMR_CPWM BIT(8)
+#define USB_HIMR_HIGHDOK BIT(7) /* High Queue DMA OK
+ Interrupt */
+#define USB_HIMR_MGNTDOK BIT(6) /* Management Queue DMA OK
+ Interrupt */
+#define USB_HIMR_BKDOK BIT(5) /* AC_BK DMA OK Interrupt */
+#define USB_HIMR_BEDOK BIT(4) /* AC_BE DMA OK Interrupt */
+#define USB_HIMR_VIDOK BIT(3) /* AC_VI DMA OK Interrupt */
+#define USB_HIMR_VODOK BIT(2) /* AC_VO DMA Interrupt */
+#define USB_HIMR_RDU BIT(1) /* Receive Descriptor
+ Unavailable */
+#define USB_HIMR_ROK BIT(0) /* Receive DMA OK Interrupt */
+
+#define REG_USB_SPECIAL_OPTION 0xfe55
+#define REG_USB_DMA_AGG_TO 0xfe5b
+#define REG_USB_AGG_TO 0xfe5c
+#define REG_USB_AGG_TH 0xfe5d
+
+#define REG_NORMAL_SIE_VID 0xfe60 /* 0xfe60 - 0xfe61 */
+#define REG_NORMAL_SIE_PID 0xfe62 /* 0xfe62 - 0xfe63 */
+#define REG_NORMAL_SIE_OPTIONAL 0xfe64
+#define REG_NORMAL_SIE_EP 0xfe65 /* 0xfe65 - 0xfe67 */
+#define REG_NORMAL_SIE_EP_TX 0xfe66
+#define NORMAL_SIE_EP_TX_HIGH_MASK 0x000f
+#define NORMAL_SIE_EP_TX_NORMAL_MASK 0x00f0
+#define NORMAL_SIE_EP_TX_LOW_MASK 0x0f00
+
+#define REG_NORMAL_SIE_PHY 0xfe68 /* 0xfe68 - 0xfe6b */
+#define REG_NORMAL_SIE_OPTIONAL2 0xfe6c
+#define REG_NORMAL_SIE_GPS_EP 0xfe6d /* RTL8723 only */
+#define REG_NORMAL_SIE_MAC_ADDR 0xfe70 /* 0xfe70 - 0xfe75 */
+#define REG_NORMAL_SIE_STRING 0xfe80 /* 0xfe80 - 0xfedf */
+
+/* RF6052 registers */
+#define RF6052_REG_AC 0x00
+#define RF6052_REG_IQADJ_G1 0x01
+#define RF6052_REG_IQADJ_G2 0x02
+#define RF6052_REG_BS_PA_APSET_G1_G4 0x03
+#define RF6052_REG_BS_PA_APSET_G5_G8 0x04
+#define RF6052_REG_POW_TRSW 0x05
+#define RF6052_REG_GAIN_RX 0x06
+#define RF6052_REG_GAIN_TX 0x07
+#define RF6052_REG_TXM_IDAC 0x08
+#define RF6052_REG_IPA_G 0x09
+#define RF6052_REG_TXBIAS_G 0x0a
+#define RF6052_REG_TXPA_AG 0x0b
+#define RF6052_REG_IPA_A 0x0c
+#define RF6052_REG_TXBIAS_A 0x0d
+#define RF6052_REG_BS_PA_APSET_G9_G11 0x0e
+#define RF6052_REG_BS_IQGEN 0x0f
+#define RF6052_REG_MODE1 0x10
+#define RF6052_REG_MODE2 0x11
+#define RF6052_REG_RX_AGC_HP 0x12
+#define RF6052_REG_TX_AGC 0x13
+#define RF6052_REG_BIAS 0x14
+#define RF6052_REG_IPA 0x15
+#define RF6052_REG_TXBIAS 0x16
+#define RF6052_REG_POW_ABILITY 0x17
+#define RF6052_REG_MODE_AG 0x18 /* RF channel and BW switch */
+#define MODE_AG_CHANNEL_MASK 0x3ff
+#define MODE_AG_CHANNEL_20MHZ BIT(10)
+
+#define RF6052_REG_TOP 0x19
+#define RF6052_REG_RX_G1 0x1a
+#define RF6052_REG_RX_G2 0x1b
+#define RF6052_REG_RX_BB2 0x1c
+#define RF6052_REG_RX_BB1 0x1d
+#define RF6052_REG_RCK1 0x1e
+#define RF6052_REG_RCK2 0x1f
+#define RF6052_REG_TX_G1 0x20
+#define RF6052_REG_TX_G2 0x21
+#define RF6052_REG_TX_G3 0x22
+#define RF6052_REG_TX_BB1 0x23
+#define RF6052_REG_T_METER 0x24
+#define RF6052_REG_SYN_G1 0x25 /* RF TX Power control */
+#define RF6052_REG_SYN_G2 0x26 /* RF TX Power control */
+#define RF6052_REG_SYN_G3 0x27 /* RF TX Power control */
+#define RF6052_REG_SYN_G4 0x28 /* RF TX Power control */
+#define RF6052_REG_SYN_G5 0x29 /* RF TX Power control */
+#define RF6052_REG_SYN_G6 0x2a /* RF TX Power control */
+#define RF6052_REG_SYN_G7 0x2b /* RF TX Power control */
+#define RF6052_REG_SYN_G8 0x2c /* RF TX Power control */
+
+#define RF6052_REG_RCK_OS 0x30 /* RF TX PA control */
+
+#define RF6052_REG_TXPA_G1 0x31 /* RF TX PA control */
+#define RF6052_REG_TXPA_G2 0x32 /* RF TX PA control */
+#define RF6052_REG_TXPA_G3 0x33 /* RF TX PA control */
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig
similarity index 100%
rename from drivers/net/wireless/rtlwifi/Kconfig
rename to drivers/net/wireless/realtek/rtlwifi/Kconfig
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/Makefile
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/base.c
rename to drivers/net/wireless/realtek/rtlwifi/base.c
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/base.h
rename to drivers/net/wireless/realtek/rtlwifi/base.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/cam.c
rename to drivers/net/wireless/realtek/rtlwifi/cam.c
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/cam.h
rename to drivers/net/wireless/realtek/rtlwifi/cam.h
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/core.c
rename to drivers/net/wireless/realtek/rtlwifi/core.c
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/core.h
rename to drivers/net/wireless/realtek/rtlwifi/core.h
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/debug.c
rename to drivers/net/wireless/realtek/rtlwifi/debug.c
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/debug.h
rename to drivers/net/wireless/realtek/rtlwifi/debug.h
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/efuse.c
rename to drivers/net/wireless/realtek/rtlwifi/efuse.c
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/efuse.h
rename to drivers/net/wireless/realtek/rtlwifi/efuse.h
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pci.c
rename to drivers/net/wireless/realtek/rtlwifi/pci.c
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pci.h
rename to drivers/net/wireless/realtek/rtlwifi/pci.h
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/ps.c
rename to drivers/net/wireless/realtek/rtlwifi/ps.c
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/ps.h
rename to drivers/net/wireless/realtek/rtlwifi/ps.h
diff --git a/drivers/net/wireless/rtlwifi/pwrseqcmd.h b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pwrseqcmd.h
rename to drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rc.c
rename to drivers/net/wireless/realtek/rtlwifi/rc.c
diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/realtek/rtlwifi/rc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rc.h
rename to drivers/net/wireless/realtek/rtlwifi/rc.h
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/regd.c
rename to drivers/net/wireless/realtek/rtlwifi/regd.c
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/realtek/rtlwifi/regd.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/regd.h
rename to drivers/net/wireless/realtek/rtlwifi/regd.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/main.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
similarity index 99%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 25db369..34ce064 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1946,6 +1946,14 @@
rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
mac->rx_data_filter = *(u16 *)val;
break;
+ case HW_VAR_KEEP_ALIVE:{
+ u8 array[2];
+ array[0] = 0xff;
+ array[1] = *((u8 *)val);
+ rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2,
+ array);
+ break;
+ }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/btc.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/main.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/stats.c
rename to drivers/net/wireless/realtek/rtlwifi/stats.c
diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/stats.h
rename to drivers/net/wireless/realtek/rtlwifi/stats.h
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/usb.c
rename to drivers/net/wireless/realtek/rtlwifi/usb.c
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/usb.h
rename to drivers/net/wireless/realtek/rtlwifi/usb.h
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/wifi.h
rename to drivers/net/wireless/realtek/rtlwifi/wifi.h
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 48a2cad..7e8bb11 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -266,7 +266,7 @@
if (beacon_diff > beacon_int)
beacon_diff = 0;
- autowake_timeout = (conf->max_sleep_period * beacon_int) - beacon_diff;
+ autowake_timeout = (conf->ps_dtim_period * beacon_int) - beacon_diff;
queue_delayed_work(rt2x00dev->workqueue,
&rt2x00dev->autowakeup_work,
autowake_timeout - 15);
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 7c355ff..ebed13a 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -350,7 +350,8 @@
cfg->bss_type = SCAN_BSS_TYPE_ANY;
/* currently NL80211 supports only a single interval */
for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
- cfg->intervals[i] = cpu_to_le32(req->interval);
+ cfg->intervals[i] = cpu_to_le32(req->scan_plans[0].interval *
+ MSEC_PER_SEC);
cfg->ssid_len = 0;
ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index c938c49..bc15aa2 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -228,13 +228,15 @@
wl18xx_adjust_channels(cmd, cmd_channels);
if (c->num_short_intervals && c->long_interval &&
- c->long_interval > req->interval) {
- cmd->short_cycles_msec = cpu_to_le16(req->interval);
+ c->long_interval > req->scan_plans[0].interval * MSEC_PER_SEC) {
+ cmd->short_cycles_msec =
+ cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
cmd->short_cycles_count = c->num_short_intervals;
} else {
cmd->short_cycles_msec = 0;
- cmd->long_cycles_msec = cpu_to_le16(req->interval);
+ cmd->long_cycles_msec =
+ cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
cmd->short_cycles_count = 0;
}
wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 9bf63c2..441b158 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1706,19 +1706,19 @@
}
static int xennet_create_queues(struct netfront_info *info,
- unsigned int num_queues)
+ unsigned int *num_queues)
{
unsigned int i;
int ret;
- info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+ info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
GFP_KERNEL);
if (!info->queues)
return -ENOMEM;
rtnl_lock();
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < *num_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
queue->id = i;
@@ -1728,7 +1728,7 @@
if (ret < 0) {
dev_warn(&info->netdev->dev,
"only created %d queues\n", i);
- num_queues = i;
+ *num_queues = i;
break;
}
@@ -1738,11 +1738,11 @@
napi_enable(&queue->napi);
}
- netif_set_real_num_tx_queues(info->netdev, num_queues);
+ netif_set_real_num_tx_queues(info->netdev, *num_queues);
rtnl_unlock();
- if (num_queues == 0) {
+ if (*num_queues == 0) {
dev_err(&info->netdev->dev, "no queues\n");
return -EINVAL;
}
@@ -1788,7 +1788,7 @@
if (info->queues)
xennet_destroy_queues(info);
- err = xennet_create_queues(info, num_queues);
+ err = xennet_create_queues(info, &num_queues);
if (err < 0)
goto destroy_ring;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e3a51b7..75718fa 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -194,7 +194,6 @@
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
-extern const struct bpf_func_proto bpf_perf_event_read_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index dfaa7b3..82c159e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -237,6 +237,10 @@
#define KASAN_ABI_VERSION 3
#endif
+#if GCC_VERSION >= 50000
+#define CC_HAVE_BUILTIN_OVERFLOW
+#endif
+
#endif /* gcc version >= 40000 specific checks */
#if !defined(__noclone)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index dcfb2f4..452c0b0 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1932,6 +1932,8 @@
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
+ WLAN_CATEGORY_WNM = 10,
+ WLAN_CATEGORY_WNM_UNPROTECTED = 11,
WLAN_CATEGORY_TDLS = 12,
WLAN_CATEGORY_MESH_ACTION = 13,
WLAN_CATEGORY_MULTIHOP_ACTION = 14,
@@ -2396,7 +2398,10 @@
category = ((u8 *) hdr) + 24;
return *category != WLAN_CATEGORY_PUBLIC &&
*category != WLAN_CATEGORY_HT &&
+ *category != WLAN_CATEGORY_WNM_UNPROTECTED &&
*category != WLAN_CATEGORY_SELF_PROTECTED &&
+ *category != WLAN_CATEGORY_UNPROT_DMG &&
+ *category != WLAN_CATEGORY_VHT &&
*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
}
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index ae5d0d2..f923d15 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -24,5 +24,6 @@
__u32 min_tx_rate;
__u32 max_tx_rate;
__u32 rss_query_en;
+ __u32 trusted;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b122eea..fa359c7 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -283,6 +283,13 @@
static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
+static inline void led_trigger_blink(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off) {}
+static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
static inline void led_trigger_set(struct led_classdev *led_cdev,
struct led_trigger *trigger) {}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 69fdd42..4ac653b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -881,6 +881,7 @@
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
+ * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_config)(struct net_device *dev,
* int vf, struct ifla_vf_info *ivf);
* int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
@@ -1054,6 +1055,10 @@
* This function is used to pass protocol port error state information
* to the switch driver. The switch driver can react to the proto_down
* by doing a phys down on the associated switch port.
+ * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
+ * This function is used to get egress tunnel information for given skb.
+ * This is useful for retrieving outer tunnel header parameters while
+ * sampling packet.
*
*/
struct net_device_ops {
@@ -1109,6 +1114,8 @@
int max_tx_rate);
int (*ndo_set_vf_spoofchk)(struct net_device *dev,
int vf, bool setting);
+ int (*ndo_set_vf_trust)(struct net_device *dev,
+ int vf, bool setting);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
@@ -1227,6 +1234,8 @@
int (*ndo_get_iflink)(const struct net_device *dev);
int (*ndo_change_proto_down)(struct net_device *dev,
bool proto_down);
+ int (*ndo_fill_metadata_dst)(struct net_device *dev,
+ struct sk_buff *skb);
};
/**
@@ -2207,6 +2216,7 @@
void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
diff --git a/include/linux/overflow-arith.h b/include/linux/overflow-arith.h
new file mode 100644
index 0000000..e12ccf8
--- /dev/null
+++ b/include/linux/overflow-arith.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include <linux/kernel.h>
+
+#ifdef CC_HAVE_BUILTIN_OVERFLOW
+
+#define overflow_usub __builtin_usub_overflow
+
+#else
+
+static inline bool overflow_usub(unsigned int a, unsigned int b,
+ unsigned int *res)
+{
+ *res = a - b;
+ return *res > a ? true : false;
+}
+
+#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 4c477e6..05fde31 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -213,7 +213,9 @@
void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
#define PHY_INTERRUPT_DISABLED 0x0
diff --git a/include/linux/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
similarity index 100%
rename from include/linux/mdio-gpio.h
rename to include/linux/platform_data/mdio-gpio.h
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
new file mode 100644
index 0000000..6a43476
--- /dev/null
+++ b/include/linux/qed/common_hsi.h
@@ -0,0 +1,607 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 4
+#define FW_REVISION_VERSION 2
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2 (4)
+#define MAX_NUM_PORTS_BB (2)
+#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2 (208)
+#define MAX_NUM_VPORTS_BB (160)
+#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2 (320)
+#define MAX_NUM_L2_QUEUES_BB (256)
+#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2 (4)
+#define NUM_OF_PHYS_TCS (8)
+
+#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO (8)
+
+#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY 0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/*****************/
+/* QM CONSTANTS */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2 512
+#define MAX_QM_TX_QUEUES_BB 448
+#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE 0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH 16
+#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH 24
+#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH 32
+#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH 32
+#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX 0
+#define CAU_FSM_ETH_TX 1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB 12
+
+#define CAU_HC_STOPPED_STATE 3
+#define CAU_HC_DISABLE_STATE 4
+#define CAU_HC_ENABLE_STATE 0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2 (368)
+#define MAX_SB_PER_PATH_BB (288)
+#define MAX_TOT_SB_PER_PATH \
+ MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD 129
+#define MAX_SB_PER_PF_SIMD 64
+#define MAX_SB_PER_VF 64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE 0x0600
+#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS 12
+#define PXP_PER_PF_ENTRY_SIZE 8
+#define PXP_NUM_GLOBAL_WINDOWS 243
+#define PXP_GLOBAL_ENTRY_SIZE 4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
+#define PXP_PF_WINDOW_ADMIN_START 0
+#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
+#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
+ PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
+ PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
+ PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+ (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+ PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4
+#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+ PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/******************/
+/* PBF CONSTANTS */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+ __le32 cid;
+ __le16 itid;
+ u8 error_code;
+ u8 fw_debug_param;
+};
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Event Data Union */
+union event_ring_data {
+ u8 bytes[8];
+ struct async_data async_info;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id;
+ u8 opcode;
+ __le16 reserved0;
+ __le16 echo;
+ u8 fw_return_code;
+ u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+ SF,
+ MF_OVLAN,
+ MF_NPAR,
+ MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+ PROTOCOLID_RESERVED1,
+ PROTOCOLID_RESERVED2,
+ PROTOCOLID_RESERVED3,
+ PROTOCOLID_CORE,
+ PROTOCOLID_ETH,
+ PROTOCOLID_RESERVED4,
+ PROTOCOLID_RESERVED5,
+ PROTOCOLID_PREROCE,
+ PROTOCOLID_COMMON,
+ PROTOCOLID_RESERVED6,
+ MAX_PROTOCOL_TYPE
+};
+
+/* status block structure */
+struct cau_pi_entry {
+ u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+ u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
+};
+
+/* core doorbell data */
+struct core_db_data {
+ u8 params;
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+ DB_AGG_CMD_NOP,
+ DB_AGG_CMD_SET,
+ DB_AGG_CMD_ADD,
+ DB_AGG_CMD_MAX,
+ MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+ DB_DEST_XCM,
+ DB_DEST_UCM,
+ DB_DEST_TCM,
+ DB_NUM_DESTINATIONS,
+ MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+ __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+ IGU_INT_ENABLE = 0,
+ IGU_INT_DISABLE = 1,
+ IGU_INT_NOP = 2,
+ IGU_INT_NOP2 = 3,
+ MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+ u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_REG = 0,
+ IGU_SEG_ACCESS_ATTN = 1,
+ MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+ __le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+ __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
+};
+
+struct pxp_pretend_concrete_fid {
+ __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+};
+
+union pxp_pretend_fid {
+ struct pxp_pretend_concrete_fid concrete_fid;
+ __le16 opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
+ __le32 sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+};
+
+#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
new file mode 100644
index 0000000..320b337
--- /dev/null
+++ b/include/linux/qed/eth_common.h
@@ -0,0 +1,279 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_CACHE_LINE_SIZE 64
+
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+#define ETH_RX_NUM_NEXT_PAGE_SGES 2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+
+#define ETH_REG_CQE_PBL_SIZE 3
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+
+/* ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_KEY_SIZE_REGS 10
+#define ETH_RSS_ENGINE_NUM_K2 207
+#define ETH_RSS_ENGINE_NUM_BB 127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_SGL_SIZE 3
+#define ETH_TPA_CQE_CONT_SGL_SIZE 6
+#define ETH_TPA_CQE_END_SGL_SIZE 4
+
+/* Queue Zone sizes */
+#define TSTORM_QZONE_SIZE 0
+#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone)
+#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone)
+#define XSTORM_QZONE_SIZE 0
+#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone)
+#define PSTORM_QZONE_SIZE 0
+
+/* Interrupt coalescing TimeSet */
+struct coalescing_timeset {
+ u8 timeset;
+ u8 valid;
+};
+
+struct eth_tx_1st_bd_flags {
+ u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+};
+
+/* The parsing information data fo rthe first tx bd of a given packet. */
+struct eth_tx_data_1st_bd {
+ __le16 vlan;
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 fw_use_only;
+};
+
+/* The parsing information data for the second tx bd of a given packet. */
+struct eth_tx_data_2nd_bd {
+ __le16 tunn_ip_size;
+ __le16 bitfields;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+ __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14
+#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15
+};
+
+/* Regular ETH Rx FP CQE. */
+struct eth_fast_path_rx_reg_cqe {
+ u8 type;
+ u8 bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+ __le16 pkt_len;
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_bd;
+ u8 placement_offset;
+ u8 reserved;
+ __le16 pbl[ETH_REG_CQE_PBL_SIZE];
+ u8 reserved1[10];
+};
+
+/* The L4 pseudo checksum mode for Ethernet */
+enum eth_l4_pseudo_checksum_mode {
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+ struct regpair addr;
+};
+
+/* regular ETH Rx SP CQE */
+struct eth_slow_path_rx_cqe {
+ u8 type;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[27];
+ __le16 echo;
+};
+
+/* union for all ETH Rx CQE types */
+union eth_rx_cqe {
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+ struct eth_slow_path_rx_cqe slow_path;
+};
+
+/* ETH Rx CQE type */
+enum eth_rx_cqe_type {
+ ETH_RX_CQE_TYPE_UNUSED,
+ ETH_RX_CQE_TYPE_REGULAR,
+ ETH_RX_CQE_TYPE_SLOW_PATH,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+/* ETH Rx producers data */
+struct eth_rx_prod_data {
+ __le16 bd_prod;
+ __le16 sge_prod;
+ __le16 cqe_prod;
+ __le16 reserved;
+};
+
+/* The first tx bd of a given packet */
+struct eth_tx_1st_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_1st_bd data;
+};
+
+/* The second tx bd of a given packet */
+struct eth_tx_2nd_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_2nd_bd data;
+};
+
+/* The parsing information data for the third tx bd of a given packet. */
+struct eth_tx_data_3rd_bd {
+ __le16 lso_mss;
+ u8 bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+ u8 resereved0[3];
+};
+
+/* The third tx bd of a given packet */
+struct eth_tx_3rd_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_3rd_bd data;
+};
+
+/* The common non-special TX BD ring element */
+struct eth_tx_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+union eth_tx_bd_types {
+ struct eth_tx_1st_bd first_bd;
+ struct eth_tx_2nd_bd second_bd;
+ struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_bd reg_bd;
+};
+
+/* Mstorm Queue Zone */
+struct mstorm_eth_queue_zone {
+ struct eth_rx_prod_data rx_producers;
+ __le32 reserved[2];
+};
+
+/* Ustorm Queue Zone */
+struct ustorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ __le16 reserved[3];
+};
+
+/* Ystorm Queue Zone */
+struct ystorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ __le16 reserved[3];
+};
+
+/* ETH doorbell data */
+struct eth_db_data {
+ u8 params;
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
new file mode 100644
index 0000000..b920c36
--- /dev/null
+++ b/include/linux/qed/qed_chain.h
@@ -0,0 +1,539 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CHAIN_H
+#define _QED_CHAIN_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+
+enum qed_chain_mode {
+ /* Each Page contains a next pointer at its end */
+ QED_CHAIN_MODE_NEXT_PTR,
+
+ /* Chain is a single page (next ptr) is unrequired */
+ QED_CHAIN_MODE_SINGLE,
+
+ /* Page pointers are located in a side list */
+ QED_CHAIN_MODE_PBL,
+};
+
+enum qed_chain_use_mode {
+ QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+};
+
+struct qed_chain_next {
+ struct regpair next_phys;
+ void *next_virt;
+};
+
+struct qed_chain_pbl {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ u16 prod_page_idx;
+ u16 cons_page_idx;
+};
+
+struct qed_chain {
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+ void *p_prod_elem;
+ void *p_cons_elem;
+ u16 page_cnt;
+ enum qed_chain_mode mode;
+ enum qed_chain_use_mode intended_use; /* used to produce/consume */
+ u16 capacity; /*< number of _usable_ elements */
+ u16 size; /* number of elements */
+ u16 prod_idx;
+ u16 cons_idx;
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_unusable;
+ u16 usable_per_page;
+ u16 elem_size;
+ u16 next_page_mask;
+ struct qed_chain_pbl pbl;
+};
+
+#define QED_CHAIN_PBL_ENTRY_SIZE (8)
+#define QED_CHAIN_PAGE_SIZE (0x1000)
+#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
+ (1 + ((sizeof(struct qed_chain_next) - 1) / \
+ (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((u32)(ELEMS_PER_PAGE(elem_size) - \
+ UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+/* Accessors */
+static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+{
+ return p_chain->prod_idx;
+}
+
+static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+{
+ return p_chain->cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+{
+ u16 used;
+
+ /* we don't need to trancate upon assignmet, as we assign u32->u16 */
+ used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
+ (u32)p_chain->cons_idx;
+ if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= (used / p_chain->elem_per_page);
+
+ return p_chain->capacity - used;
+}
+
+static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
+{
+ return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
+}
+
+static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
+{
+ return qed_chain_get_elem_left(p_chain) == 0;
+}
+
+static inline u16 qed_chain_get_elem_per_page(
+ struct qed_chain *p_chain)
+{
+ return p_chain->elem_per_page;
+}
+
+static inline u16 qed_chain_get_usable_per_page(
+ struct qed_chain *p_chain)
+{
+ return p_chain->usable_per_page;
+}
+
+static inline u16 qed_chain_get_unusable_per_page(
+ struct qed_chain *p_chain)
+{
+ return p_chain->elem_unusable;
+}
+
+static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
+{
+ return p_chain->size;
+}
+
+static inline dma_addr_t
+qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+{
+ return p_chain->pbl.p_phys_table;
+}
+
+/**
+ * @brief qed_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static inline void
+qed_chain_advance_page(struct qed_chain *p_chain,
+ void **p_next_elem,
+ u16 *idx_to_inc,
+ u16 *page_to_inc)
+
+{
+ switch (p_chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ {
+ struct qed_chain_next *p_next = *p_next_elem;
+ *p_next_elem = p_next->next_virt;
+ *idx_to_inc += p_chain->elem_unusable;
+ break;
+ }
+ case QED_CHAIN_MODE_SINGLE:
+ *p_next_elem = p_chain->p_virt_addr;
+ break;
+
+ case QED_CHAIN_MODE_PBL:
+ /* It is assumed pages are sequential, next element needs
+ * to change only when passing going back to first from last.
+ */
+ if (++(*page_to_inc) == p_chain->page_cnt) {
+ *page_to_inc = 0;
+ *p_next_elem = p_chain->p_virt_addr;
+ }
+ }
+}
+
+#define is_unusable_idx(p, idx) \
+ (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx) \
+ ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define test_ans_skip(p, idx) \
+ do { \
+ if (is_unusable_idx(p, idx)) { \
+ (p)->idx += (p)->elem_unusable; \
+ } \
+ } while (0)
+
+/**
+ * @brief qed_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static inline void
+qed_chain_return_multi_produced(struct qed_chain *p_chain,
+ u16 num)
+{
+ p_chain->cons_idx += num;
+ test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static inline void qed_chain_return_produced(struct qed_chain *p_chain)
+{
+ p_chain->cons_idx++;
+ test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_produce(struct qed_chain *p_chain)
+{
+ void *ret = NULL;
+
+ if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
+ p_chain->next_page_mask) {
+ qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ &p_chain->prod_idx,
+ &p_chain->pbl.prod_page_idx);
+ }
+
+ ret = p_chain->p_prod_elem;
+ p_chain->prod_idx++;
+ p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+ p_chain->elem_size);
+
+ return ret;
+}
+
+/**
+ * @brief qed_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return u16, number of unusable BDs
+ */
+static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
+{
+ return p_chain->capacity;
+}
+
+/**
+ * @brief qed_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static inline void
+qed_chain_recycle_consumed(struct qed_chain *p_chain)
+{
+ test_ans_skip(p_chain, prod_idx);
+ p_chain->prod_idx++;
+}
+
+/**
+ * @brief qed_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static inline void *qed_chain_consume(struct qed_chain *p_chain)
+{
+ void *ret = NULL;
+
+ if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
+ p_chain->next_page_mask) {
+ qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ &p_chain->cons_idx,
+ &p_chain->pbl.cons_page_idx);
+ }
+
+ ret = p_chain->p_cons_elem;
+ p_chain->cons_idx++;
+ p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+ p_chain->elem_size);
+
+ return ret;
+}
+
+/**
+ * @brief qed_chain_reset - Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static inline void qed_chain_reset(struct qed_chain *p_chain)
+{
+ int i;
+
+ p_chain->prod_idx = 0;
+ p_chain->cons_idx = 0;
+ p_chain->p_cons_elem = p_chain->p_virt_addr;
+ p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1;
+ p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1;
+ }
+
+ switch (p_chain->intended_use) {
+ case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case QED_CHAIN_USE_TO_PRODUCE:
+ /* Do nothing */
+ break;
+
+ case QED_CHAIN_USE_TO_CONSUME:
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
+ qed_chain_recycle_consumed(p_chain);
+ break;
+ }
+}
+
+/**
+ * @brief qed_chain_init - Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param p_virt_addr
+ * @param p_phys_addr physical address of allocated buffer's beginning
+ * @param page_cnt number of pages in the allocated buffer
+ * @param elem_size size of each element in the chain
+ * @param intended_use
+ * @param mode
+ */
+static inline void qed_chain_init(struct qed_chain *p_chain,
+ void *p_virt_addr,
+ dma_addr_t p_phys_addr,
+ u16 page_cnt,
+ u8 elem_size,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode)
+{
+ /* chain fixed parameters */
+ p_chain->p_virt_addr = p_virt_addr;
+ p_chain->p_phys_addr = p_phys_addr;
+ p_chain->elem_size = elem_size;
+ p_chain->page_cnt = page_cnt;
+ p_chain->mode = mode;
+
+ p_chain->intended_use = intended_use;
+ p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+ p_chain->usable_per_page =
+ USABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->capacity = p_chain->usable_per_page * page_cnt;
+ p_chain->size = p_chain->elem_per_page * page_cnt;
+ p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+
+ p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+
+ p_chain->next_page_mask = (p_chain->usable_per_page &
+ p_chain->elem_per_page_mask);
+
+ if (mode == QED_CHAIN_MODE_NEXT_PTR) {
+ struct qed_chain_next *p_next;
+ u16 i;
+
+ for (i = 0; i < page_cnt - 1; i++) {
+ /* Increment mem_phy to the next page. */
+ p_phys_addr += QED_CHAIN_PAGE_SIZE;
+
+ /* Initialize the physical address of the next page. */
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+ elem_size *
+ p_chain->
+ usable_per_page);
+
+ p_next->next_phys.lo = DMA_LO_LE(p_phys_addr);
+ p_next->next_phys.hi = DMA_HI_LE(p_phys_addr);
+
+ /* Initialize the virtual address of the next page. */
+ p_next->next_virt = (void *)((u8 *)p_virt_addr +
+ QED_CHAIN_PAGE_SIZE);
+
+ /* Move to the next page. */
+ p_virt_addr = p_next->next_virt;
+ }
+
+ /* Last page's next should point to beginning of the chain */
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+ elem_size *
+ p_chain->usable_per_page);
+
+ p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr);
+ p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr);
+ p_next->next_virt = p_chain->p_virt_addr;
+ }
+ qed_chain_reset(p_chain);
+}
+
+/**
+ * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
+ * struct
+ * @param p_chain
+ * @param p_virt_addr virtual address of allocated buffer's beginning
+ * @param p_phys_addr physical address of allocated buffer's beginning
+ * @param page_cnt number of pages in the allocated buffer
+ * @param elem_size size of each element in the chain
+ * @param use_mode
+ * @param p_phys_pbl pointer to a pre-allocated side table
+ * which will hold physical page addresses.
+ * @param p_virt_pbl pointer to a pre allocated side table
+ * which will hold virtual page addresses.
+ */
+static inline void
+qed_chain_pbl_init(struct qed_chain *p_chain,
+ void *p_virt_addr,
+ dma_addr_t p_phys_addr,
+ u16 page_cnt,
+ u8 elem_size,
+ enum qed_chain_use_mode use_mode,
+ dma_addr_t p_phys_pbl,
+ dma_addr_t *p_virt_pbl)
+{
+ dma_addr_t *p_pbl_dma = p_virt_pbl;
+ int i;
+
+ qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
+ elem_size, use_mode, QED_CHAIN_MODE_PBL);
+
+ p_chain->pbl.p_phys_table = p_phys_pbl;
+ p_chain->pbl.p_virt_table = p_virt_pbl;
+
+ /* Fill the PBL with physical addresses*/
+ for (i = 0; i < page_cnt; i++) {
+ *p_pbl_dma = p_phys_addr;
+ p_phys_addr += QED_CHAIN_PAGE_SIZE;
+ p_pbl_dma++;
+ }
+}
+
+/**
+ * @brief qed_chain_set_prod - sets the prod to the given
+ * value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+ u16 prod_idx,
+ void *p_prod_elem)
+{
+ p_chain->prod_idx = prod_idx;
+ p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief qed_chain_get_elem -
+ *
+ * get a pointer to an element represented by absolute idx
+ *
+ * @param p_chain
+ * @assumption p_chain->size is a power of 2
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
+ u16 idx)
+{
+ void *ret = NULL;
+
+ if (idx >= p_chain->size)
+ return NULL;
+
+ ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
+
+ return ret;
+}
+
+/**
+ * @brief qed_chain_sge_inc_cons_prod
+ *
+ * for sge chains, producer isn't increased serially, the ring
+ * is expected to be full at all times. Once elements are
+ * consumed, they are immediately produced.
+ *
+ * @param p_chain
+ * @param cnt
+ *
+ * @return inline void
+ */
+static inline void
+qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
+ u16 cnt)
+{
+ p_chain->prod_idx += cnt;
+ p_chain->cons_idx += cnt;
+}
+
+#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
new file mode 100644
index 0000000..81ab178
--- /dev/null
+++ b/include/linux/qed/qed_eth_if.h
@@ -0,0 +1,165 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ETH_IF_H
+#define _QED_ETH_IF_H
+
+#include <linux/list.h>
+#include <linux/if_link.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ u8 num_queues;
+ u8 num_tc;
+
+ u8 port_mac[ETH_ALEN];
+ u8 num_vlan_filters;
+};
+
+struct qed_update_vport_rss_params {
+ u16 rss_ind_table[128];
+ u32 rss_key[10];
+};
+
+struct qed_update_vport_params {
+ u8 vport_id;
+ u8 update_vport_active_flg;
+ u8 vport_active_flg;
+ u8 update_rss_flg;
+ struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_stop_rxq_params {
+ u8 rss_id;
+ u8 rx_queue_id;
+ u8 vport_id;
+ bool eq_completion_only;
+};
+
+struct qed_stop_txq_params {
+ u8 rss_id;
+ u8 tx_queue_id;
+};
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+ QED_FILTER_XCAST_TYPE_ADD,
+ QED_FILTER_XCAST_TYPE_DEL,
+ QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+struct qed_filter_ucast_params {
+ enum qed_filter_xcast_params_type type;
+ u8 vlan_valid;
+ u16 vlan;
+ u8 mac_valid;
+ unsigned char mac[ETH_ALEN];
+};
+
+struct qed_filter_mcast_params {
+ enum qed_filter_xcast_params_type type;
+ u8 num;
+ unsigned char mac[64][ETH_ALEN];
+};
+
+union qed_filter_type_params {
+ enum qed_filter_rx_mode_type accept_flags;
+ struct qed_filter_ucast_params ucast;
+ struct qed_filter_mcast_params mcast;
+};
+
+enum qed_filter_type {
+ QED_FILTER_TYPE_UCAST,
+ QED_FILTER_TYPE_MCAST,
+ QED_FILTER_TYPE_RX_MODE,
+ QED_MAX_FILTER_TYPES,
+};
+
+struct qed_filter_params {
+ enum qed_filter_type type;
+ union qed_filter_type_params filter;
+};
+
+struct qed_queue_start_common_params {
+ u8 rss_id;
+ u8 queue_id;
+ u8 vport_id;
+ u16 sb;
+ u16 sb_idx;
+};
+
+struct qed_eth_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_eth_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_eth_cb_ops *ops,
+ void *cookie);
+
+ int (*vport_start)(struct qed_dev *cdev,
+ u8 vport_id, u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg);
+
+ int (*vport_stop)(struct qed_dev *cdev,
+ u8 vport_id);
+
+ int (*vport_update)(struct qed_dev *cdev,
+ struct qed_update_vport_params *params);
+
+ int (*q_rx_start)(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod);
+
+ int (*q_rx_stop)(struct qed_dev *cdev,
+ struct qed_stop_rxq_params *params);
+
+ int (*q_tx_start)(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell);
+
+ int (*q_tx_stop)(struct qed_dev *cdev,
+ struct qed_stop_txq_params *params);
+
+ int (*filter_config)(struct qed_dev *cdev,
+ struct qed_filter_params *params);
+
+ int (*fastpath_stop)(struct qed_dev *cdev);
+
+ int (*eth_cqe_completion)(struct qed_dev *cdev,
+ u8 rss_id,
+ struct eth_slow_path_rx_cqe *cqe);
+
+ void (*get_vport_stats)(struct qed_dev *cdev,
+ struct qed_eth_stats *stats);
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+void qed_put_eth_ops(void);
+
+#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
new file mode 100644
index 0000000..dc9a1353
--- /dev/null
+++ b/include/linux/qed/qed_if.h
@@ -0,0 +1,498 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IF_H
+#define _QED_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_chain.h>
+
+#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+ (void __iomem *)(reg_addr))
+
+#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+
+#define QED_COALESCE_MAX 0xFF
+
+/* forward */
+struct qed_dev;
+
+struct qed_eth_pf_params {
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+};
+
+struct qed_pf_params {
+ struct qed_eth_pf_params eth_pf_params;
+};
+
+enum qed_int_mode {
+ QED_INT_MODE_INTA,
+ QED_INT_MODE_MSIX,
+ QED_INT_MODE_MSI,
+ QED_INT_MODE_POLL,
+};
+
+struct qed_sb_info {
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void __iomem *igu_addr;
+ u8 flags;
+#define QED_SB_INFO_INIT 0x1
+#define QED_SB_INFO_SETUP 0x2
+
+ struct qed_dev *cdev;
+};
+
+struct qed_dev_info {
+ unsigned long pci_mem_start;
+ unsigned long pci_mem_end;
+ unsigned int pci_irq;
+ u8 num_hwfns;
+
+ u8 hw_mac[ETH_ALEN];
+ bool is_mf;
+
+ /* FW version */
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ /* MFW version */
+ u32 mfw_rev;
+
+ u32 flash_size;
+ u8 mf_mode;
+};
+
+enum qed_sb_type {
+ QED_SB_TYPE_L2_QUEUE,
+};
+
+enum qed_protocol {
+ QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+ bool link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
+ u32 override_flags;
+ bool autoneg;
+ u32 adv_speeds;
+ u32 forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
+ u32 pause_config;
+};
+
+struct qed_link_output {
+ bool link_up;
+
+ u32 supported_caps; /* In SUPPORTED defs */
+ u32 advertised_caps; /* In ADVERTISED defs */
+ u32 lp_caps; /* In ADVERTISED defs */
+ u32 speed; /* In Mb/s */
+ u8 duplex; /* In DUPLEX defs */
+ u8 port; /* In PORT defs */
+ bool autoneg;
+ u32 pause_config;
+};
+
+#define QED_DRV_VER_STR_SIZE 12
+struct qed_slowpath_params {
+ u32 int_mode;
+ u8 drv_major;
+ u8 drv_minor;
+ u8 drv_rev;
+ u8 drv_eng;
+ u8 name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+
+struct qed_int_info {
+ struct msix_entry *msix;
+ u8 msix_cnt;
+
+ /* This should be updated by the protocol driver */
+ u8 used_cnt;
+};
+
+struct qed_common_cb_ops {
+ void (*link_update)(void *dev,
+ struct qed_link_output *link);
+};
+
+struct qed_common_ops {
+ struct qed_dev* (*probe)(struct pci_dev *dev,
+ enum qed_protocol protocol,
+ u32 dp_module, u8 dp_level);
+
+ void (*remove)(struct qed_dev *cdev);
+
+ int (*set_power_state)(struct qed_dev *cdev,
+ pci_power_t state);
+
+ void (*set_id)(struct qed_dev *cdev,
+ char name[],
+ char ver_str[]);
+
+ /* Client drivers need to make this call before slowpath_start.
+ * PF params required for the call before slowpath_start is
+ * documented within the qed_pf_params structure definition.
+ */
+ void (*update_pf_params)(struct qed_dev *cdev,
+ struct qed_pf_params *params);
+ int (*slowpath_start)(struct qed_dev *cdev,
+ struct qed_slowpath_params *params);
+
+ int (*slowpath_stop)(struct qed_dev *cdev);
+
+ /* Requests to use `cnt' interrupts for fastpath.
+ * upon success, returns number of interrupts allocated for fastpath.
+ */
+ int (*set_fp_int)(struct qed_dev *cdev,
+ u16 cnt);
+
+ /* Fills `info' with pointers required for utilizing interrupts */
+ int (*get_fp_int)(struct qed_dev *cdev,
+ struct qed_int_info *info);
+
+ u32 (*sb_init)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ u32 (*sb_release)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+ void (*simd_handler_config)(struct qed_dev *cdev,
+ void *token,
+ int index,
+ void (*handler)(void *));
+
+ void (*simd_handler_clean)(struct qed_dev *cdev,
+ int index);
+/**
+ * @brief set_link - set links according to params
+ *
+ * @param cdev
+ * @param params - values used to override the default link configuration
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*set_link)(struct qed_dev *cdev,
+ struct qed_link_params *params);
+
+/**
+ * @brief get_link - returns the current link state.
+ *
+ * @param cdev
+ * @param if_link - structure to be filled with current link configuration.
+ */
+ void (*get_link)(struct qed_dev *cdev,
+ struct qed_link_output *if_link);
+
+/**
+ * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @param cdev
+ */
+ int (*drain)(struct qed_dev *cdev);
+
+/**
+ * @brief update_msglvl - update module debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+ void (*update_msglvl)(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+ int (*chain_alloc)(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain);
+
+ void (*chain_free)(struct qed_dev *cdev,
+ struct qed_chain *p_chain);
+};
+
+/**
+ * @brief qed_get_protocol_version
+ *
+ * @param protocol
+ *
+ * @return version supported by qed for given protocol driver
+ */
+u32 qed_get_protocol_version(enum qed_protocol protocol);
+
+#define MASK_FIELD(_name, _value) \
+ ((_value) &= (_name ## _MASK))
+
+#define FIELD_VALUE(_name, _value) \
+ ((_value & _name ## _MASK) << _name ## _SHIFT)
+
+#define SET_FIELD(value, name, flag) \
+ do { \
+ (value) &= ~(name ## _MASK << name ## _SHIFT); \
+ (value) |= (((u64)flag) << (name ## _SHIFT)); \
+ } while (0)
+
+#define GET_FIELD(value, name) \
+ (((value) >> (name ## _SHIFT)) & name ## _MASK)
+
+/* Debug print definitions */
+#define DP_ERR(cdev, fmt, ...) \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__) \
+
+#define DP_NOTICE(cdev, fmt, ...) \
+ do { \
+ if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ \
+ } \
+ } while (0)
+
+#define DP_INFO(cdev, fmt, ...) \
+ do { \
+ if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define DP_VERBOSE(cdev, module, fmt, ...) \
+ do { \
+ if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
+ ((cdev)->dp_module & module))) { \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+enum DP_LEVEL {
+ QED_LEVEL_VERBOSE = 0x0,
+ QED_LEVEL_INFO = 0x1,
+ QED_LEVEL_NOTICE = 0x2,
+ QED_LEVEL_ERR = 0x3,
+};
+
+#define QED_LOG_LEVEL_SHIFT (30)
+#define QED_LOG_VERBOSE_MASK (0x3fffffff)
+#define QED_LOG_INFO_MASK (0x40000000)
+#define QED_LOG_NOTICE_MASK (0x80000000)
+
+enum DP_MODULE {
+ QED_MSG_SPQ = 0x10000,
+ QED_MSG_STATS = 0x20000,
+ QED_MSG_DCB = 0x40000,
+ QED_MSG_IOV = 0x80000,
+ QED_MSG_SP = 0x100000,
+ QED_MSG_STORAGE = 0x200000,
+ QED_MSG_CXT = 0x800000,
+ QED_MSG_ILT = 0x2000000,
+ QED_MSG_ROCE = 0x4000000,
+ QED_MSG_DEBUG = 0x8000000,
+ /* to be added...up to 0x8000000 */
+};
+
+struct qed_eth_stats {
+ u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 tpa_coalesced_pkts;
+ u64 tpa_coalesced_events;
+ u64 tpa_aborts_num;
+ u64 tpa_not_coalesced_pkts;
+ u64 tpa_coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_127_byte_packets;
+ u64 rx_255_byte_packets;
+ u64 rx_511_byte_packets;
+ u64 rx_1023_byte_packets;
+ u64 rx_1518_byte_packets;
+ u64 rx_1522_byte_packets;
+ u64 rx_2047_byte_packets;
+ u64 rx_4095_byte_packets;
+ u64 rx_9216_byte_packets;
+ u64 rx_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 rx_mac_bytes;
+ u64 rx_mac_uc_packets;
+ u64 rx_mac_mc_packets;
+ u64 rx_mac_bc_packets;
+ u64 rx_mac_frames_ok;
+ u64 tx_mac_bytes;
+ u64 tx_mac_uc_packets;
+ u64 tx_mac_mc_packets;
+ u64 tx_mac_bc_packets;
+ u64 tx_mac_ctrl_frames;
+};
+
+#define QED_SB_IDX 0x0002
+
+#define RX_PI 0
+#define TX_PI(tc) (RX_PI + 1 + tc)
+
+static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
+{
+ u32 prod = 0;
+ u16 rc = 0;
+
+ prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
+ STATUS_BLOCK_PROD_INDEX_MASK;
+ if (sb_info->sb_ack != prod) {
+ sb_info->sb_ack = prod;
+ rc |= QED_SB_IDX;
+ }
+
+ /* Let SB update */
+ mmiowb();
+ return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ * written to the IGU.
+ *
+ * @param sb_info - This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @param int_cmd - Enable/Disable/Nop
+ * @param upd_flg - whether igu consumer should be
+ * updated.
+ *
+ * @return inline void
+ */
+static inline void qed_sb_ack(struct qed_sb_info *sb_info,
+ enum igu_int_cmd int_cmd,
+ u8 upd_flg)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ mmiowb();
+ barrier();
+}
+
+static inline void __internal_ram_wr(void *p_hwfn,
+ void __iomem *addr,
+ int size,
+ u32 *data)
+
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
+}
+
+static inline void internal_ram_wr(void __iomem *addr,
+ int size,
+ u32 *data)
+{
+ __internal_ram_wr(NULL, addr, size, data);
+}
+
+#endif
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index f426503..2296e6b 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -95,4 +95,15 @@
return;
}
#endif /* CONFIG_SECCOMP_FILTER */
+
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
+extern long seccomp_get_filter(struct task_struct *task,
+ unsigned long filter_off, void __user *data);
+#else
+static inline long seccomp_get_filter(struct task_struct *task,
+ unsigned long n, void __user *data)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4398411..24f4dfd 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -463,6 +463,15 @@
return delta_us;
}
+static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
+ const struct skb_mstamp *t0)
+{
+ s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
+
+ if (!diff)
+ diff = t1->stamp_us - t0->stamp_us;
+ return diff > 0;
+}
/**
* struct sk_buff - socket buffer
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 86a7eda..c906f45 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -194,6 +194,12 @@
u32 window_clamp; /* Maximal window to advertise */
u32 rcv_ssthresh; /* Current window clamp */
+ /* Information of the most recently (s)acked skb */
+ struct tcp_rack {
+ struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+ u8 advanced; /* mstamp advanced since last lost marking */
+ u8 reord; /* reordering detected */
+ } rack;
u16 advmss; /* Advertised MSS */
u8 unused;
u8 nonagle : 4,/* Disable Nagle algorithm? */
@@ -217,6 +223,9 @@
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
+ struct rtt_meas {
+ u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
+ } rtt_min[3];
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
@@ -280,8 +289,6 @@
int lost_cnt_hint;
u32 retransmit_high; /* L-bits may be on up to this seqno */
- u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
-
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
@@ -385,8 +392,9 @@
static inline void fastopen_queue_tune(struct sock *sk, int backlog)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
- queue->fastopenq.max_qlen = backlog;
+ queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
}
static inline void tcp_saved_syn_free(struct tcp_sock *tp)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 90332a1..48155be 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5,6 +5,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1501,13 +1502,26 @@
};
/**
+ * struct cfg80211_sched_scan_plan - scan plan for scheduled scan
+ *
+ * @interval: interval between scheduled scan iterations. In seconds.
+ * @iterations: number of scan iterations in this scan plan. Zero means
+ * infinite loop.
+ * The last scan plan will always have this parameter set to zero,
+ * all other scan plans will have a finite number of iterations.
+ */
+struct cfg80211_sched_scan_plan {
+ u32 interval;
+ u32 iterations;
+};
+
+/**
* struct cfg80211_sched_scan_request - scheduled scan request description
*
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
* @scan_width: channel width for scanning
- * @interval: interval between each scheduled scan cycle
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
* @flags: bit field of flags controlling operation
@@ -1526,6 +1540,9 @@
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @scan_plans: scan plans to be executed in this scheduled scan. Lowest
+ * index must be executed first.
+ * @n_scan_plans: number of scan plans, at least 1.
* @rcu_head: RCU callback used to free the struct
* @owner_nlportid: netlink portid of owner (if this should is a request
* owned by a particular socket)
@@ -1539,7 +1556,6 @@
int n_ssids;
u32 n_channels;
enum nl80211_bss_scan_width scan_width;
- u32 interval;
const u8 *ie;
size_t ie_len;
u32 flags;
@@ -1547,6 +1563,8 @@
int n_match_sets;
s32 min_rssi_thold;
u32 delay;
+ struct cfg80211_sched_scan_plan *scan_plans;
+ int n_scan_plans;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
@@ -1576,6 +1594,26 @@
};
/**
+ * struct cfg80211_inform_bss - BSS inform data
+ * @chan: channel the frame was received on
+ * @scan_width: scan width that was used
+ * @signal: signal strength value, according to the wiphy's
+ * signal type
+ * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
+ * received; should match the time when the frame was actually
+ * received by the device (not just by the host, in case it was
+ * buffered on the device) and be accurate to about 10ms.
+ * If the frame isn't buffered, just passing the return value of
+ * ktime_get_boot_ns() is likely appropriate.
+ */
+struct cfg80211_inform_bss {
+ struct ieee80211_channel *chan;
+ enum nl80211_bss_scan_width scan_width;
+ s32 signal;
+ u64 boottime_ns;
+};
+
+/**
* struct cfg80211_bss_ie_data - BSS entry IE data
* @tsf: TSF contained in the frame that carried these IEs
* @rcu_head: internal use, for freeing
@@ -3056,6 +3094,12 @@
* include fixed IEs like supported rates
* @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled
* scans
+ * @max_sched_scan_plans: maximum number of scan plans (scan interval and number
+ * of iterations) for scheduled scan supported by the device.
+ * @max_sched_scan_plan_interval: maximum interval (in seconds) for a
+ * single scan plan supported by the device.
+ * @max_sched_scan_plan_iterations: maximum number of iterations for a single
+ * scan plan supported by the device.
* @coverage_class: current coverage class
* @fw_version: firmware version for ethtool reporting
* @hw_version: hardware version for ethtool reporting
@@ -3163,6 +3207,9 @@
u8 max_match_sets;
u16 max_scan_ie_len;
u16 max_sched_scan_ie_len;
+ u32 max_sched_scan_plans;
+ u32 max_sched_scan_plan_interval;
+ u32 max_sched_scan_plan_iterations;
int n_cipher_suites;
const u32 *cipher_suites;
@@ -3958,14 +4005,11 @@
void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
/**
- * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
- *
+ * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
* @wiphy: the wiphy reporting the BSS
- * @rx_channel: The channel the frame was received on
- * @scan_width: width of the control channel
+ * @data: the BSS metadata
* @mgmt: the management frame (probe response or beacon)
* @len: length of the management frame
- * @signal: the signal strength, type depends on the wiphy's signal_type
* @gfp: context flags
*
* This informs cfg80211 that BSS information was found and
@@ -3975,11 +4019,26 @@
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum nl80211_bss_scan_width scan_width,
struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp);
+ s32 signal, gfp_t gfp)
+{
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = scan_width,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
+}
static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
@@ -3987,9 +4046,13 @@
struct ieee80211_mgmt *mgmt, size_t len,
s32 signal, gfp_t gfp)
{
- return cfg80211_inform_bss_width_frame(wiphy, rx_channel,
- NL80211_BSS_CHAN_WIDTH_20,
- mgmt, len, signal, gfp);
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = NL80211_BSS_CHAN_WIDTH_20,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
}
/**
@@ -4006,11 +4069,10 @@
};
/**
- * cfg80211_inform_bss_width - inform cfg80211 of a new BSS
+ * cfg80211_inform_bss_data - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
- * @rx_channel: The channel the frame was received on
- * @scan_width: width of the control channel
+ * @data: the BSS metadata
* @ftype: frame type (if known)
* @bssid: the BSSID of the BSS
* @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
@@ -4018,7 +4080,6 @@
* @beacon_interval: the beacon interval announced by the peer
* @ie: additional IEs sent by the peer
* @ielen: length of the additional IEs
- * @signal: the signal strength, type depends on the wiphy's signal_type
* @gfp: context flags
*
* This informs cfg80211 that BSS information was found and
@@ -4028,13 +4089,32 @@
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_width(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum nl80211_bss_scan_width scan_width,
enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp);
+ s32 signal, gfp_t gfp)
+{
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = scan_width,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
+ capability, beacon_interval, ie, ielen,
+ gfp);
+}
static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
@@ -4044,11 +4124,15 @@
u16 beacon_interval, const u8 *ie, size_t ielen,
s32 signal, gfp_t gfp)
{
- return cfg80211_inform_bss_width(wiphy, rx_channel,
- NL80211_BSS_CHAN_WIDTH_20, ftype,
- bssid, tsf, capability,
- beacon_interval, ie, ielen, signal,
- gfp);
+ struct cfg80211_inform_bss data = {
+ .chan = rx_channel,
+ .scan_width = NL80211_BSS_CHAN_WIDTH_20,
+ .signal = signal,
+ };
+
+ return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
+ capability, beacon_interval, ie, ielen,
+ gfp);
}
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index e005886..98ccbde 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -198,6 +198,7 @@
}
struct switchdev_trans;
+struct switchdev_obj;
struct switchdev_obj_port_fdb;
struct dsa_switch_driver {
@@ -327,9 +328,9 @@
struct switchdev_trans *trans);
int (*port_fdb_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb);
- int (*port_fdb_getnext)(struct dsa_switch *ds, int port,
- unsigned char *addr, u16 *vid,
- bool *is_static);
+ int (*port_fdb_dump)(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj));
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index af9d538..ce00971 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -60,6 +60,38 @@
return tun_dst;
}
+static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ int md_size = md_dst->u.tun_info.options_len;
+ struct metadata_dst *new_md;
+
+ if (!md_dst)
+ return ERR_PTR(-EINVAL);
+
+ new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
+ if (!new_md)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+ sizeof(struct ip_tunnel_info) + md_size);
+ skb_dst_drop(skb);
+ dst_hold(&new_md->dst);
+ skb_dst_set(skb, &new_md->dst);
+ return new_md;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *dst;
+
+ dst = tun_dst_unclone(skb);
+ if (IS_ERR(dst))
+ return NULL;
+
+ return &dst->u.tun_info;
+}
+
static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 6361570..481fe1c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -43,7 +43,9 @@
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst);
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req);
u16 net_header_len;
u16 net_frag_header_len;
u16 sockaddr_len;
@@ -272,6 +274,9 @@
struct sock *child);
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ struct request_sock *req,
+ bool own_req);
static inline void inet_csk_reqsk_queue_added(struct sock *sk)
{
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 6683ada..de2e3ad 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -205,8 +205,8 @@
void inet_hashinfo_init(struct inet_hashinfo *h);
-int inet_ehash_insert(struct sock *sk, struct sock *osk);
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+bool inet_ehash_insert(struct sock *sk, struct sock *osk);
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
void __inet_hash(struct sock *sk, struct sock *osk);
void inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 4ec6fed..4b9dd07 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1241,11 +1241,6 @@
* @flags: configuration flags defined above
*
* @listen_interval: listen interval in units of beacon interval
- * @max_sleep_period: the maximum number of beacon intervals to sleep for
- * before checking the beacon for a TIM bit (managed mode only); this
- * value will be only achievable between DTIM frames, the hardware
- * needs to check for the multicast traffic bit in DTIM beacons.
- * This variable is valid only when the CONF_PS flag is set.
* @ps_dtim_period: The DTIM period of the AP we're connected to, for use
* in power saving. Power saving will not be enabled until a beacon
* has been received and the DTIM period is known.
@@ -1275,7 +1270,6 @@
struct ieee80211_conf {
u32 flags;
int power_level, dynamic_ps_timeout;
- int max_sleep_period;
u16 listen_interval;
u8 ps_dtim_period;
@@ -1683,6 +1677,7 @@
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
+ * @mfp: indicates whether the STA uses management frame protection or not.
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
@@ -1700,6 +1695,7 @@
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
+ bool mfp;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 4757997..179253f 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -18,7 +18,7 @@
struct mpls_iptunnel_encap {
u32 label[MAX_NEW_LABELS];
- u32 labels;
+ u8 labels;
};
static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index aff6ceb..2f87c1b 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -124,7 +124,8 @@
int (*fill_link_af)(struct sk_buff *skb,
const struct net_device *dev,
u32 ext_filter_mask);
- size_t (*get_link_af_size)(const struct net_device *dev);
+ size_t (*get_link_af_size)(const struct net_device *dev,
+ u32 ext_filter_mask);
int (*validate_link_af)(const struct net_device *dev,
const struct nlattr *attr);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eed94fc..f80e74c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -279,6 +279,7 @@
extern int sysctl_tcp_challenge_ack_limit;
extern unsigned int sysctl_tcp_notsent_lowat;
extern int sysctl_tcp_min_tso_segs;
+extern int sysctl_tcp_min_rtt_wlen;
extern int sysctl_tcp_autocorking;
extern int sysctl_tcp_invalid_ratelimit;
extern int sysctl_tcp_pacing_ss_ratio;
@@ -456,7 +457,9 @@
void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst);
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req);
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int tcp_connect(struct sock *sk);
@@ -566,6 +569,7 @@
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk);
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
@@ -671,6 +675,12 @@
return dst_metric_locked(dst, RTAX_CC_ALGO);
}
+/* Minimum RTT in usec. ~0 means not available. */
+static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
+{
+ return tp->rtt_min[0].rtt;
+}
+
/* Compute the actual receive window we are currently advertising.
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
@@ -1743,6 +1753,19 @@
void tcp_v4_init(void);
void tcp_init(void);
+/* tcp_recovery.c */
+
+/* Flags to enable various loss recovery features. See below */
+extern int sysctl_tcp_recovery;
+
+/* Use TCP RACK to detect (some) tail and retransmit losses */
+#define TCP_RACK_LOST_RETRANS 0x1
+
+extern int tcp_rack_mark_lost(struct sock *sk);
+
+extern void tcp_rack_advance(struct tcp_sock *tp,
+ const struct skb_mstamp *xmit_time, u8 sacked);
+
/*
* Save and compile IPv4 options, return a pointer to it
*/
diff --git a/include/net/tso.h b/include/net/tso.h
index 47e5444..b7be852 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -8,6 +8,7 @@
void *data;
size_t size;
u16 ip_id;
+ bool ipv6;
u32 tcp_seq;
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 564f1f0..2e03242 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -287,6 +287,17 @@
* Return: realm if != 0
*/
BPF_FUNC_get_route_realm,
+
+ /**
+ * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample
+ * @ctx: struct pt_regs*
+ * @map: pointer to perf_event_array map
+ * @index: index of event in the map
+ * @data: data on stack to be output as raw data
+ * @size: size of data
+ * Return: 0 on success
+ */
+ BPF_FUNC_perf_event_output,
__BPF_FUNC_MAX_ID,
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index e3b6217..5ad5737 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -461,6 +461,7 @@
IFLA_GENEVE_TOS,
IFLA_GENEVE_PORT, /* destination port */
IFLA_GENEVE_COLLECT_METADATA,
+ IFLA_GENEVE_REMOTE6,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -550,6 +551,7 @@
* on/off switch
*/
IFLA_VF_STATS, /* network device statistics */
+ IFLA_VF_TRUST, /* Trust VF */
__IFLA_VF_MAX,
};
@@ -611,6 +613,11 @@
#define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
+struct ifla_vf_trust {
+ __u32 vf;
+ __u32 setting;
+};
+
/* VF ports management section
*
* Nested layout of set/get msg is:
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c0ab6b0..1f0b4cf 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -10,6 +10,7 @@
* Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
+ * Copyright 2015 Intel Deutschland GmbH
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -328,7 +329,15 @@
* partial scan results may be available
*
* @NL80211_CMD_START_SCHED_SCAN: start a scheduled scan at certain
- * intervals, as specified by %NL80211_ATTR_SCHED_SCAN_INTERVAL.
+ * intervals and certain number of cycles, as specified by
+ * %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is
+ * not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified,
+ * scheduled scan will run in an infinite loop with the specified interval.
+ * These attributes are mutually exculsive,
+ * i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if
+ * NL80211_ATTR_SCHED_SCAN_PLANS is defined.
+ * If for some reason scheduled scan is aborted by the driver, all scan
+ * plans are canceled (including scan plans that did not start yet).
* Like with normal scans, if SSIDs (%NL80211_ATTR_SCAN_SSIDS)
* are passed, they are used in the probe requests. For
* broadcast, a broadcast SSID must be passed (ie. an empty
@@ -1761,6 +1770,19 @@
* @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device
* is operating in an indoor environment.
*
+ * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS: maximum number of scan plans for
+ * scheduled scan supported by the device (u32), a wiphy attribute.
+ * @NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL: maximum interval (in seconds) for
+ * a scan plan (u32), a wiphy attribute.
+ * @NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS: maximum number of iterations in
+ * a scan plan (u32), a wiphy attribute.
+ * @NL80211_ATTR_SCHED_SCAN_PLANS: a list of scan plans for scheduled scan.
+ * Each scan plan defines the number of scan iterations and the interval
+ * between scans. The last scan plan will always run infinitely,
+ * thus it must not specify the number of iterations, only the interval
+ * between scans. The scan plans are executed sequentially.
+ * Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2130,6 +2152,11 @@
NL80211_ATTR_REG_INDOOR,
+ NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS,
+ NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL,
+ NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
+ NL80211_ATTR_SCHED_SCAN_PLANS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3364,6 +3391,9 @@
* (not present if no beacon frame has been received yet)
* @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and
* @NL80211_BSS_TSF is known to be from a probe response (flag attribute)
+ * @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry
+ * was last updated by a received frame. The value is expected to be
+ * accurate to about 10ms. (u64, nanoseconds)
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -3383,6 +3413,7 @@
NL80211_BSS_CHAN_WIDTH,
NL80211_BSS_BEACON_TSF,
NL80211_BSS_PRESP_DATA,
+ NL80211_BSS_LAST_SEEN_BOOTTIME,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -4589,4 +4620,28 @@
NL80211_TDLS_PEER_WMM = 1<<2,
};
+/**
+ * enum nl80211_sched_scan_plan - scanning plan for scheduled scan
+ * @__NL80211_SCHED_SCAN_PLAN_INVALID: attribute number 0 is reserved
+ * @NL80211_SCHED_SCAN_PLAN_INTERVAL: interval between scan iterations. In
+ * seconds (u32).
+ * @NL80211_SCHED_SCAN_PLAN_ITERATIONS: number of scan iterations in this
+ * scan plan (u32). The last scan plan must not specify this attribute
+ * because it will run infinitely. A value of zero is invalid as it will
+ * make the scan plan meaningless.
+ * @NL80211_SCHED_SCAN_PLAN_MAX: highest scheduled scan plan attribute number
+ * currently defined
+ * @__NL80211_SCHED_SCAN_PLAN_AFTER_LAST: internal use
+ */
+enum nl80211_sched_scan_plan {
+ __NL80211_SCHED_SCAN_PLAN_INVALID,
+ NL80211_SCHED_SCAN_PLAN_INTERVAL,
+ NL80211_SCHED_SCAN_PLAN_ITERATIONS,
+
+ /* keep last */
+ __NL80211_SCHED_SCAN_PLAN_AFTER_LAST,
+ NL80211_SCHED_SCAN_PLAN_MAX =
+ __NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index fef125e..28ccedd 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -622,7 +622,8 @@
* enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
* @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
* table. This allows future packets for the same connection to be identified
- * as 'established' or 'related'.
+ * as 'established' or 'related'. The flow key for the current packet will
+ * retain the pre-commit connection state.
* @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
* @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
* mask, the corresponding bit in the value is copied to the connection
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 2881145..d3c4176 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -110,6 +110,7 @@
PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
PERF_COUNT_SW_EMULATION_FAULTS = 8,
PERF_COUNT_SW_DUMMY = 9,
+ PERF_COUNT_SW_BPF_OUTPUT = 10,
PERF_COUNT_SW_MAX, /* non-ABI */
};
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index a7a6979..fb81065 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -64,6 +64,8 @@
#define PTRACE_GETSIGMASK 0x420a
#define PTRACE_SETSIGMASK 0x420b
+#define PTRACE_SECCOMP_GET_FILTER 0x420c
+
/* Read signals from a shared (process wide) queue */
#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index f2d9e69..3f4c99e 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -292,14 +292,23 @@
attr = perf_event_attrs(event);
if (IS_ERR(attr))
- return (void *)attr;
+ goto err;
- if (attr->type != PERF_TYPE_RAW &&
- attr->type != PERF_TYPE_HARDWARE) {
- perf_event_release_kernel(event);
- return ERR_PTR(-EINVAL);
- }
- return event;
+ if (attr->inherit)
+ goto err;
+
+ if (attr->type == PERF_TYPE_RAW)
+ return event;
+
+ if (attr->type == PERF_TYPE_HARDWARE)
+ return event;
+
+ if (attr->type == PERF_TYPE_SOFTWARE &&
+ attr->config == PERF_COUNT_SW_BPF_OUTPUT)
+ return event;
+err:
+ perf_event_release_kernel(event);
+ return ERR_PTR(-EINVAL);
}
static void perf_event_fd_array_put_ptr(void *ptr)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1d6b97b..b56cf51 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -245,6 +245,7 @@
} func_limit[] = {
{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+ {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
};
static void print_verifier_state(struct verifier_env *env)
@@ -910,7 +911,7 @@
* don't allow any other map type to be passed into
* the special func;
*/
- if (bool_map != bool_func)
+ if (bool_func && bool_map != bool_func)
return -EINVAL;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b11756f..64754bf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5286,9 +5286,15 @@
if (sample_type & PERF_SAMPLE_RAW) {
if (data->raw) {
- perf_output_put(handle, data->raw->size);
- __output_copy(handle, data->raw->data,
- data->raw->size);
+ u32 raw_size = data->raw->size;
+ u32 real_size = round_up(raw_size + sizeof(u32),
+ sizeof(u64)) - sizeof(u32);
+ u64 zero = 0;
+
+ perf_output_put(handle, real_size);
+ __output_copy(handle, data->raw->data, raw_size);
+ if (real_size - raw_size)
+ __output_copy(handle, &zero, real_size - raw_size);
} else {
struct {
u32 size;
@@ -5420,8 +5426,7 @@
else
size += sizeof(u32);
- WARN_ON_ONCE(size & (sizeof(u64)-1));
- header->size += size;
+ header->size += round_up(size, sizeof(u64));
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 787320d..b760bae 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -1016,6 +1016,11 @@
break;
}
#endif
+
+ case PTRACE_SECCOMP_GET_FILTER:
+ ret = seccomp_get_filter(child, addr, datavp);
+ break;
+
default:
break;
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 06858a7..580ac2d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -347,6 +347,7 @@
{
struct seccomp_filter *sfilter;
int ret;
+ const bool save_orig = config_enabled(CONFIG_CHECKPOINT_RESTORE);
if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
return ERR_PTR(-EINVAL);
@@ -370,7 +371,7 @@
return ERR_PTR(-ENOMEM);
ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
- seccomp_check_filter, false);
+ seccomp_check_filter, save_orig);
if (ret < 0) {
kfree(sfilter);
return ERR_PTR(ret);
@@ -867,3 +868,76 @@
/* prctl interface doesn't have flags, so they are always zero. */
return do_seccomp(op, 0, uargs);
}
+
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
+long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
+ void __user *data)
+{
+ struct seccomp_filter *filter;
+ struct sock_fprog_kern *fprog;
+ long ret;
+ unsigned long count = 0;
+
+ if (!capable(CAP_SYS_ADMIN) ||
+ current->seccomp.mode != SECCOMP_MODE_DISABLED) {
+ return -EACCES;
+ }
+
+ spin_lock_irq(&task->sighand->siglock);
+ if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ filter = task->seccomp.filter;
+ while (filter) {
+ filter = filter->prev;
+ count++;
+ }
+
+ if (filter_off >= count) {
+ ret = -ENOENT;
+ goto out;
+ }
+ count -= filter_off;
+
+ filter = task->seccomp.filter;
+ while (filter && count > 1) {
+ filter = filter->prev;
+ count--;
+ }
+
+ if (WARN_ON(count != 1 || !filter)) {
+ /* The filter tree shouldn't shrink while we're using it. */
+ ret = -ENOENT;
+ goto out;
+ }
+
+ fprog = filter->prog->orig_prog;
+ if (!fprog) {
+ /* This must be a new non-cBPF filter, since we save every
+ * every cBPF filter's orig_prog above when
+ * CONFIG_CHECKPOINT_RESTORE is enabled.
+ */
+ ret = -EMEDIUMTYPE;
+ goto out;
+ }
+
+ ret = fprog->len;
+ if (!data)
+ goto out;
+
+ get_seccomp_filter(task);
+ spin_unlock_irq(&task->sighand->siglock);
+
+ if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
+ ret = -EFAULT;
+
+ put_seccomp_filter(task);
+ return ret;
+
+out:
+ spin_unlock_irq(&task->sighand->siglock);
+ return ret;
+}
+#endif
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0fe96c7..4228fd36 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -199,6 +199,11 @@
if (!event)
return -ENOENT;
+ /* make sure event is local and doesn't have pmu::count */
+ if (event->oncpu != smp_processor_id() ||
+ event->pmu->count)
+ return -EINVAL;
+
/*
* we don't know if the function is run successfully by the
* return value. It can be judged in other places, such as
@@ -207,14 +212,58 @@
return perf_event_read_local(event);
}
-const struct bpf_func_proto bpf_perf_event_read_proto = {
+static const struct bpf_func_proto bpf_perf_event_read_proto = {
.func = bpf_perf_event_read,
- .gpl_only = false,
+ .gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_ANYTHING,
};
+static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+{
+ struct pt_regs *regs = (struct pt_regs *) (long) r1;
+ struct bpf_map *map = (struct bpf_map *) (long) r2;
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ void *data = (void *) (long) r4;
+ struct perf_sample_data sample_data;
+ struct perf_event *event;
+ struct perf_raw_record raw = {
+ .size = size,
+ .data = data,
+ };
+
+ if (unlikely(index >= array->map.max_entries))
+ return -E2BIG;
+
+ event = (struct perf_event *)array->ptrs[index];
+ if (unlikely(!event))
+ return -ENOENT;
+
+ if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
+ event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
+ return -EINVAL;
+
+ if (unlikely(event->oncpu != smp_processor_id()))
+ return -EOPNOTSUPP;
+
+ perf_sample_data_init(&sample_data, 0, 0);
+ sample_data.raw = &raw;
+ perf_event_output(event, &sample_data, regs);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto = {
+ .func = bpf_perf_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_STACK,
+ .arg5_type = ARG_CONST_STACK_SIZE,
+};
+
static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -242,6 +291,8 @@
return &bpf_get_smp_processor_id_proto;
case BPF_FUNC_perf_event_read:
return &bpf_perf_event_read_proto;
+ case BPF_FUNC_perf_event_output:
+ return &bpf_perf_event_output_proto;
default:
return NULL;
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c88bd8e..a642bb8 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -495,7 +495,9 @@
static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
struct net_bridge_port *source,
const unsigned char *addr,
- __u16 vid)
+ __u16 vid,
+ unsigned char is_local,
+ unsigned char is_static)
{
struct net_bridge_fdb_entry *fdb;
@@ -504,8 +506,8 @@
memcpy(fdb->addr.addr, addr, ETH_ALEN);
fdb->dst = source;
fdb->vlan_id = vid;
- fdb->is_local = 0;
- fdb->is_static = 0;
+ fdb->is_local = is_local;
+ fdb->is_static = is_static;
fdb->added_by_user = 0;
fdb->added_by_external_learn = 0;
fdb->updated = fdb->used = jiffies;
@@ -536,11 +538,10 @@
fdb_delete(br, fdb);
}
- fdb = fdb_create(head, source, addr, vid);
+ fdb = fdb_create(head, source, addr, vid, 1, 1);
if (!fdb)
return -ENOMEM;
- fdb->is_local = fdb->is_static = 1;
fdb_add_hw_addr(br, addr);
fdb_notify(br, fdb, RTM_NEWNEIGH);
return 0;
@@ -597,7 +598,7 @@
} else {
spin_lock(&br->hash_lock);
if (likely(!fdb_find(head, addr, vid))) {
- fdb = fdb_create(head, source, addr, vid);
+ fdb = fdb_create(head, source, addr, vid, 0, 0);
if (fdb) {
if (unlikely(added_by_user))
fdb->added_by_user = 1;
@@ -774,7 +775,7 @@
if (!(flags & NLM_F_CREATE))
return -ENOENT;
- fdb = fdb_create(head, source, addr, vid);
+ fdb = fdb_create(head, source, addr, vid, 0, 0);
if (!fdb)
return -ENOMEM;
@@ -1099,7 +1100,7 @@
head = &br->hash[br_mac_hash(addr, vid)];
fdb = fdb_find(head, addr, vid);
if (!fdb) {
- fdb = fdb_create(head, p, addr, vid);
+ fdb = fdb_create(head, p, addr, vid, 0, 0);
if (!fdb) {
err = -ENOMEM;
goto err_unlock;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a9d424e..fcdb86d 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -141,7 +141,7 @@
/* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
{
- if (should_deliver(to, skb)) {
+ if (to && should_deliver(to, skb)) {
if (skb0)
deliver_clone(to, skb, __br_forward);
else
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 94b4de8..40197ff 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1214,29 +1214,10 @@
return 0;
}
-static size_t br_get_link_af_size(const struct net_device *dev)
-{
- struct net_bridge_port *p;
- struct net_bridge *br;
- int num_vlans = 0;
-
- if (br_port_exists(dev)) {
- p = br_port_get_rtnl(dev);
- num_vlans = br_get_num_vlan_infos(nbp_vlan_group(p),
- RTEXT_FILTER_BRVLAN);
- } else if (dev->priv_flags & IFF_EBRIDGE) {
- br = netdev_priv(dev);
- num_vlans = br_get_num_vlan_infos(br_vlan_group(br),
- RTEXT_FILTER_BRVLAN);
- }
-
- /* Each VLAN is returned in bridge_vlan_info along with flags */
- return num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
-}
static struct rtnl_af_ops br_af_ops __read_mostly = {
.family = AF_BRIDGE,
- .get_link_af_size = br_get_link_af_size,
+ .get_link_af_size = br_get_link_af_size_filtered,
};
struct rtnl_link_ops br_link_ops __read_mostly = {
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 4ca449a..fa53d7a 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -15,6 +15,7 @@
#include <linux/kmod.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
+#include <net/switchdev.h>
#include "br_private.h"
#include "br_private_stp.h"
@@ -35,11 +36,22 @@
/* called under bridge lock */
void br_init_port(struct net_bridge_port *p)
{
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+ .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
+ .u.ageing_time = p->br->ageing_time,
+ };
+ int err;
+
p->port_id = br_make_port_id(p->priority, p->port_no);
br_become_designated_port(p);
br_set_state(p, BR_STATE_BLOCKING);
p->topology_change_ack = 0;
p->config_pending = 0;
+
+ err = switchdev_port_attr_set(p->dev, &attr);
+ if (err)
+ netdev_err(p->dev, "failed to set HW ageing time\n");
}
/* called under bridge lock */
diff --git a/net/core/dev.c b/net/core/dev.c
index 1225b4b..13f49f8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -99,6 +99,7 @@
#include <linux/rtnetlink.h>
#include <linux/stat.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
#include <net/xfrm.h>
@@ -682,6 +683,32 @@
EXPORT_SYMBOL(dev_get_iflink);
/**
+ * dev_fill_metadata_dst - Retrieve tunnel egress information.
+ * @dev: targeted interface
+ * @skb: The packet.
+ *
+ * For better visibility of tunnel traffic OVS needs to retrieve
+ * egress tunnel information for a packet. Following API allows
+ * user to get this info.
+ */
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info;
+
+ if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
+ return -EINVAL;
+
+ info = skb_tunnel_info_unclone(skb);
+ if (!info)
+ return -ENOMEM;
+ if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
+ return -EINVAL;
+
+ return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
+}
+EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
+
+/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2477595..504bd17 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -497,7 +497,8 @@
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);
-static size_t rtnl_link_get_af_size(const struct net_device *dev)
+static size_t rtnl_link_get_af_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
struct rtnl_af_ops *af_ops;
size_t size;
@@ -509,7 +510,7 @@
if (af_ops->get_link_af_size) {
/* AF_* + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
- af_ops->get_link_af_size(dev);
+ af_ops->get_link_af_size(dev, ext_filter_mask);
}
}
@@ -837,7 +838,8 @@
/* IFLA_VF_STATS_BROADCAST */
nla_total_size(sizeof(__u64)) +
/* IFLA_VF_STATS_MULTICAST */
- nla_total_size(sizeof(__u64)));
+ nla_total_size(sizeof(__u64)) +
+ nla_total_size(sizeof(struct ifla_vf_trust)));
return size;
} else
return 0;
@@ -900,7 +902,7 @@
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
- + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ nla_total_size(1); /* IFLA_PROTO_DOWN */
@@ -1160,6 +1162,7 @@
struct ifla_vf_link_state vf_linkstate;
struct ifla_vf_rss_query_en vf_rss_query_en;
struct ifla_vf_stats vf_stats;
+ struct ifla_vf_trust vf_trust;
/*
* Not all SR-IOV capable drivers support the
@@ -1169,6 +1172,7 @@
*/
ivi.spoofchk = -1;
ivi.rss_query_en = -1;
+ ivi.trusted = -1;
memset(ivi.mac, 0, sizeof(ivi.mac));
/* The default value for VF link state is "auto"
* IFLA_VF_LINK_STATE_AUTO which equals zero
@@ -1182,7 +1186,8 @@
vf_tx_rate.vf =
vf_spoofchk.vf =
vf_linkstate.vf =
- vf_rss_query_en.vf = ivi.vf;
+ vf_rss_query_en.vf =
+ vf_trust.vf = ivi.vf;
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
@@ -1193,6 +1198,7 @@
vf_spoofchk.setting = ivi.spoofchk;
vf_linkstate.link_state = ivi.linkstate;
vf_rss_query_en.setting = ivi.rss_query_en;
+ vf_trust.setting = ivi.trusted;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
@@ -1210,7 +1216,9 @@
&vf_linkstate) ||
nla_put(skb, IFLA_VF_RSS_QUERY_EN,
sizeof(vf_rss_query_en),
- &vf_rss_query_en))
+ &vf_rss_query_en) ||
+ nla_put(skb, IFLA_VF_TRUST,
+ sizeof(vf_trust), &vf_trust))
goto nla_put_failure;
memset(&vf_stats, 0, sizeof(vf_stats));
if (dev->netdev_ops->ndo_get_vf_stats)
@@ -1347,6 +1355,7 @@
[IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
[IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
[IFLA_VF_STATS] = { .type = NLA_NESTED },
+ [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
};
static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
@@ -1586,6 +1595,16 @@
return err;
}
+ if (tb[IFLA_VF_TRUST]) {
+ struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_trust)
+ err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
+ if (err < 0)
+ return err;
+ }
+
return err;
}
@@ -3443,4 +3462,3 @@
rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
}
-
diff --git a/net/core/sock.c b/net/core/sock.c
index dcc7d62..0ef30aa 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -422,13 +422,25 @@
}
}
+static bool sock_needs_netstamp(const struct sock *sk)
+{
+ switch (sk->sk_family) {
+ case AF_UNSPEC:
+ case AF_UNIX:
+ return false;
+ default:
+ return true;
+ }
+}
+
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
{
if (sk->sk_flags & flags) {
sk->sk_flags &= ~flags;
- if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
+ if (sock_needs_netstamp(sk) &&
+ !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
net_disable_timestamp();
}
}
@@ -1582,7 +1594,8 @@
if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk);
- if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
+ if (sock_needs_netstamp(sk) &&
+ newsk->sk_flags & SK_FLAGS_TIMESTAMP)
net_enable_timestamp();
}
out:
@@ -2510,7 +2523,8 @@
* time stamping, but time stamping might have been on
* already because of the other one
*/
- if (!(previous_flags & SK_FLAGS_TIMESTAMP))
+ if (sock_needs_netstamp(sk) &&
+ !(previous_flags & SK_FLAGS_TIMESTAMP))
net_enable_timestamp();
}
}
diff --git a/net/core/tso.c b/net/core/tso.c
index 630b30b..5dca7ce 100644
--- a/net/core/tso.c
+++ b/net/core/tso.c
@@ -1,4 +1,5 @@
#include <linux/export.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/tso.h>
#include <asm/unaligned.h>
@@ -14,18 +15,24 @@
void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
int size, bool is_last)
{
- struct iphdr *iph;
struct tcphdr *tcph;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int mac_hdr_len = skb_network_offset(skb);
memcpy(hdr, skb->data, hdr_len);
- iph = (struct iphdr *)(hdr + mac_hdr_len);
- iph->id = htons(tso->ip_id);
- iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+ if (!tso->ipv6) {
+ struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+ iph->id = htons(tso->ip_id);
+ iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+ tso->ip_id++;
+ } else {
+ struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len);
+
+ iph->payload_len = htons(size + tcp_hdrlen(skb));
+ }
tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
put_unaligned_be32(tso->tcp_seq, &tcph->seq);
- tso->ip_id++;
if (!is_last) {
/* Clear all special flags for not last packet */
@@ -61,6 +68,7 @@
tso->ip_id = ntohs(ip_hdr(skb)->id);
tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
tso->next_frag_idx = 0;
+ tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
/* Build first data */
tso->size = skb_headlen(skb) - hdr_len;
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 923f5a1..b0e28d2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -278,7 +278,9 @@
struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst);
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req);
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 59bc180..5684e14 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -393,7 +393,9 @@
struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
@@ -426,7 +428,7 @@
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
- __inet_hash_nolisten(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index d9cc731f..ef4e48c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -380,7 +380,9 @@
static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *newnp;
@@ -393,7 +395,8 @@
/*
* v6 mapped
*/
- newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
+ newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
+ req_unhash, own_req);
if (newsk == NULL)
return NULL;
@@ -511,7 +514,7 @@
dccp_done(newsk);
goto out;
}
- __inet_hash(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index d10aace..1994f8a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -143,6 +143,7 @@
{
struct sock *child = NULL;
struct dccp_request_sock *dreq = dccp_rsk(req);
+ bool own_req;
/* Check for retransmitted REQUEST */
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
@@ -182,14 +183,13 @@
if (dccp_parse_options(sk, dreq, skb))
goto drop;
- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
- if (child == NULL)
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+ req, &own_req);
+ if (!child)
goto listen_overflow;
- inet_csk_reqsk_queue_drop(sk, req);
- inet_csk_reqsk_queue_add(sk, req, child);
-out:
- return child;
+ return inet_csk_complete_hashdance(sk, child, req, own_req);
+
listen_overflow:
dccp_pr_debug("listen_overflow!\n");
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
@@ -198,7 +198,7 @@
req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req);
- goto out;
+ return NULL;
}
EXPORT_SYMBOL_GPL(dccp_check_req);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index b0b8da0..481754e 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -378,31 +378,11 @@
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- unsigned char addr[ETH_ALEN] = { 0 };
- u16 vid = 0;
- int ret;
- if (!ds->drv->port_fdb_getnext)
- return -EOPNOTSUPP;
+ if (ds->drv->port_fdb_dump)
+ return ds->drv->port_fdb_dump(ds, p->port, fdb, cb);
- for (;;) {
- bool is_static;
-
- ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
- &is_static);
- if (ret < 0)
- break;
-
- ether_addr_copy(fdb->addr, addr);
- fdb->vid = vid;
- fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
-
- ret = cb(&fdb->obj);
- if (ret < 0)
- break;
- }
-
- return ret == -ENOENT ? 0 : ret;
+ return -EOPNOTSUPP;
}
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 89aacb6..c29809f 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,6 +8,7 @@
inet_timewait_sock.o inet_connection_sock.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
+ tcp_recovery.o \
tcp_offload.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o \
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7350084..cebd9d3 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1644,7 +1644,8 @@
rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
}
-static size_t inet_get_link_af_size(const struct net_device *dev)
+static size_t inet_get_link_af_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
@@ -2398,4 +2399,3 @@
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
inet_netconf_dump_devconf, NULL);
}
-
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index d7c2bb0..e786873 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -867,9 +867,10 @@
if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
(prefix != addr || ifa->ifa_prefixlen < 32)) {
- fib_magic(RTM_NEWROUTE,
- dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
- prefix, ifa->ifa_prefixlen, prim);
+ if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+ fib_magic(RTM_NEWROUTE,
+ dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+ prefix, ifa->ifa_prefixlen, prim);
/* Add network specific broadcasts, when it takes a sense */
if (ifa->ifa_prefixlen < 31) {
@@ -914,9 +915,10 @@
}
} else if (!ipv4_is_zeronet(any) &&
(any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
- fib_magic(RTM_DELROUTE,
- dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
- any, ifa->ifa_prefixlen, prim);
+ if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+ fib_magic(RTM_DELROUTE,
+ dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+ any, ifa->ifa_prefixlen, prim);
subnet = 1;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8430bc8..1feb15f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -523,15 +523,15 @@
struct request_sock *req)
{
struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
- spinlock_t *lock;
- bool found;
+ bool found = false;
- lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
+ if (sk_hashed(req_to_sk(req))) {
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
- spin_lock(lock);
- found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
- spin_unlock(lock);
-
+ spin_lock(lock);
+ found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
+ spin_unlock(lock);
+ }
if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
@@ -811,6 +811,25 @@
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ struct request_sock *req, bool own_req)
+{
+ if (own_req) {
+ inet_csk_reqsk_queue_drop(sk, req);
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ inet_csk_reqsk_queue_add(sk, req, child);
+ /* Warning: caller must not call reqsk_put(req);
+ * child stole last reference on it.
+ */
+ return child;
+ }
+ /* Too bad, another child took ownership of the request, undo. */
+ bh_unlock_sock(child);
+ sock_put(child);
+ return NULL;
+}
+EXPORT_SYMBOL(inet_csk_complete_hashdance);
+
/*
* This routine closes sockets which have been at least partially
* opened, but not yet accepted.
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 958728a..ccc5980 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -407,13 +407,13 @@
/* insert a socket into ehash, and eventually remove another one
* (The another one can be a SYN_RECV or TIMEWAIT
*/
-int inet_ehash_insert(struct sock *sk, struct sock *osk)
+bool inet_ehash_insert(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
struct inet_ehash_bucket *head;
spinlock_t *lock;
- int ret = 0;
+ bool ret = true;
WARN_ON_ONCE(!sk_unhashed(sk));
@@ -423,30 +423,41 @@
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock(lock);
- __sk_nulls_add_node_rcu(sk, list);
if (osk) {
- WARN_ON(sk->sk_hash != osk->sk_hash);
- sk_nulls_del_node_init_rcu(osk);
+ WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+ ret = sk_nulls_del_node_init_rcu(osk);
}
+ if (ret)
+ __sk_nulls_add_node_rcu(sk, list);
spin_unlock(lock);
return ret;
}
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
{
- inet_ehash_insert(sk, osk);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ bool ok = inet_ehash_insert(sk, osk);
+
+ if (ok) {
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ } else {
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+ sk->sk_state = TCP_CLOSE;
+ sock_set_flag(sk, SOCK_DEAD);
+ inet_csk_destroy_sock(sk);
+ }
+ return ok;
}
-EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
+EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
void __inet_hash(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb;
- if (sk->sk_state != TCP_LISTEN)
- return __inet_hash_nolisten(sk, osk);
-
+ if (sk->sk_state != TCP_LISTEN) {
+ inet_ehash_nolisten(sk, osk);
+ return;
+ }
WARN_ON(!sk_unhashed(sk));
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -567,7 +578,7 @@
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
- __inet_hash_nolisten(sk, (struct sock *)tw);
+ inet_ehash_nolisten(sk, (struct sock *)tw);
}
if (tw)
inet_twsk_bind_unhash(tw, hinfo);
@@ -584,7 +595,7 @@
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- __inet_hash_nolisten(sk, NULL);
+ inet_ehash_nolisten(sk, NULL);
spin_unlock_bh(&head->lock);
return 0;
} else {
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index bd0679d..6145214 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -498,10 +498,26 @@
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
+static struct rtable *gre_get_rt(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi4 *fl,
+ const struct ip_tunnel_key *key)
+{
+ struct net *net = dev_net(dev);
+
+ memset(fl, 0, sizeof(*fl));
+ fl->daddr = key->u.ipv4.dst;
+ fl->saddr = key->u.ipv4.src;
+ fl->flowi4_tos = RT_TOS(key->tos);
+ fl->flowi4_mark = skb->mark;
+ fl->flowi4_proto = IPPROTO_GRE;
+
+ return ip_route_output_key(net, fl);
+}
+
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel_info *tun_info;
- struct net *net = dev_net(dev);
const struct ip_tunnel_key *key;
struct flowi4 fl;
struct rtable *rt;
@@ -516,14 +532,7 @@
goto err_free_skb;
key = &tun_info->key;
- memset(&fl, 0, sizeof(fl));
- fl.daddr = key->u.ipv4.dst;
- fl.saddr = key->u.ipv4.src;
- fl.flowi4_tos = RT_TOS(key->tos);
- fl.flowi4_mark = skb->mark;
- fl.flowi4_proto = IPPROTO_GRE;
-
- rt = ip_route_output_key(net, &fl);
+ rt = gre_get_rt(skb, dev, &fl, key);
if (IS_ERR(rt))
goto err_free_skb;
@@ -566,6 +575,24 @@
dev->stats.tx_dropped++;
}
+static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ if (ip_tunnel_info_af(info) != AF_INET)
+ return -EINVAL;
+
+ rt = gre_get_rt(skb, dev, &fl4, &info->key);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+ info->key.u.ipv4.src = fl4.saddr;
+ return 0;
+}
+
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1023,6 +1050,7 @@
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
+ .ndo_fill_metadata_dst = gre_fill_metadata_dst,
};
static void ipgre_tap_setup(struct net_device *dev)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 690d27d..a355841 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -75,6 +75,7 @@
config NF_DUP_IPV4
tristate "Netfilter IPv4 packet duplication to alternate destination"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
help
This option enables the nf_dup_ipv4 core, which duplicates an IPv4
packet to be rerouted to another destination.
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 74dd667..78cc64e 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -60,9 +60,7 @@
if (FIB_RES_DEV(res) == dev)
dev_match = true;
#endif
- if (dev_match || flags & XT_RPFILTER_LOOSE)
- return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
- return dev_match;
+ return dev_match || flags & XT_RPFILTER_LOOSE;
}
static bool rpfilter_is_local(const struct sk_buff *skb)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 4c0892b..4cbe9f0 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -221,8 +221,10 @@
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
+ bool own_req;
- child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
+ child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
+ NULL, &own_req);
if (child) {
atomic_set(&req->rsk_refcnt, 1);
sock_rps_save_rxhash(child, skb);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 894da3a..25300c5 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -496,6 +496,13 @@
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_recovery",
+ .data = &sysctl_tcp_recovery,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "tcp_reordering",
.data = &sysctl_tcp_reordering,
.maxlen = sizeof(int),
@@ -577,6 +584,13 @@
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_min_rtt_wlen",
+ .data = &sysctl_tcp_min_rtt_wlen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "tcp_low_latency",
.data = &sysctl_tcp_low_latency,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ac1bdbb..0cfa7c0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -388,6 +388,7 @@
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+ tp->rtt_min[0].rtt = ~0U;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 7092a61..7e538f7 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -209,7 +209,7 @@
/* alpha = (1 - g) * alpha + g * F */
- alpha -= alpha >> dctcp_shift_g;
+ alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
if (bytes_ecn) {
/* If dctcp_shift_g == 1, a 32bit value would overflow
* after 8 Mbytes.
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 93396bf..55be6ac 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -133,12 +133,14 @@
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
struct sock *child;
u32 end_seq;
+ bool own_req;
req->num_retrans = 0;
req->num_timeout = 0;
req->sk = NULL;
- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+ NULL, &own_req);
if (!child)
return NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 944eaca..fdd88c3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -95,6 +95,7 @@
int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
int sysctl_tcp_frto __read_mostly = 2;
+int sysctl_tcp_min_rtt_wlen __read_mostly = 300;
int sysctl_tcp_thin_dupack __read_mostly;
@@ -880,6 +881,7 @@
if (metric > 0)
tcp_disable_early_retrans(tp);
+ tp->rack.reord = 1;
}
/* This must be called before lost_out is incremented */
@@ -905,8 +907,7 @@
}
}
-static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
- struct sk_buff *skb)
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
{
tcp_verify_retransmit_hint(tp, skb);
@@ -1047,70 +1048,6 @@
return !before(start_seq, end_seq - tp->max_window);
}
-/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
- * Event "B". Later note: FACK people cheated me again 8), we have to account
- * for reordering! Ugly, but should help.
- *
- * Search retransmitted skbs from write_queue that were sent when snd_nxt was
- * less than what is now known to be received by the other end (derived from
- * highest SACK block). Also calculate the lowest snd_nxt among the remaining
- * retransmitted skbs to avoid some costly processing per ACKs.
- */
-static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
-{
- const struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb;
- int cnt = 0;
- u32 new_low_seq = tp->snd_nxt;
- u32 received_upto = tcp_highest_sack_seq(tp);
-
- if (!tcp_is_fack(tp) || !tp->retrans_out ||
- !after(received_upto, tp->lost_retrans_low) ||
- icsk->icsk_ca_state != TCP_CA_Recovery)
- return;
-
- tcp_for_write_queue(skb, sk) {
- u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
-
- if (skb == tcp_send_head(sk))
- break;
- if (cnt == tp->retrans_out)
- break;
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
- continue;
-
- if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
- continue;
-
- /* TODO: We would like to get rid of tcp_is_fack(tp) only
- * constraint here (see above) but figuring out that at
- * least tp->reordering SACK blocks reside between ack_seq
- * and received_upto is not easy task to do cheaply with
- * the available datastructures.
- *
- * Whether FACK should check here for tp->reordering segs
- * in-between one could argue for either way (it would be
- * rather simple to implement as we could count fack_count
- * during the walk and do tp->fackets_out - fack_count).
- */
- if (after(received_upto, ack_seq)) {
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
- tp->retrans_out -= tcp_skb_pcount(skb);
- *flag |= FLAG_LOST_RETRANS;
- tcp_skb_mark_lost_uncond_verify(tp, skb);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
- } else {
- if (before(ack_seq, new_low_seq))
- new_low_seq = ack_seq;
- cnt += tcp_skb_pcount(skb);
- }
- }
-
- if (tp->retrans_out)
- tp->lost_retrans_low = new_low_seq;
-}
-
static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
@@ -1236,6 +1173,8 @@
return sacked;
if (!(sacked & TCPCB_SACKED_ACKED)) {
+ tcp_rack_advance(tp, xmit_time, sacked);
+
if (sacked & TCPCB_SACKED_RETRANS) {
/* If the segment is not tagged as lost,
* we do not clear RETRANS, believing
@@ -1837,7 +1776,6 @@
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
- tcp_mark_lost_retrans(sk, &state->flag);
tcp_verify_left_out(tp);
out:
@@ -2314,14 +2252,29 @@
tp->snd_cwnd_stamp = tcp_time_stamp;
}
+static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
+{
+ return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
+ before(tp->rx_opt.rcv_tsecr, when);
+}
+
+/* skb is spurious retransmitted if the returned timestamp echo
+ * reply is prior to the skb transmission time
+ */
+static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
+ const struct sk_buff *skb)
+{
+ return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
+ tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
+}
+
/* Nothing was retransmitted or returned timestamp is less
* than timestamp of the first retransmission.
*/
static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
- (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
- before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
+ tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
}
/* Undo procedures. */
@@ -2853,6 +2806,11 @@
}
}
+ /* Use RACK to detect loss */
+ if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
+ tcp_rack_mark_lost(sk))
+ flag |= FLAG_LOST_RETRANS;
+
/* E. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
@@ -2915,8 +2873,69 @@
tcp_xmit_retransmit_queue(sk);
}
+/* Kathleen Nichols' algorithm for tracking the minimum value of
+ * a data stream over some fixed time interval. (E.g., the minimum
+ * RTT over the past five minutes.) It uses constant space and constant
+ * time per update yet almost always delivers the same minimum as an
+ * implementation that has to keep all the data in the window.
+ *
+ * The algorithm keeps track of the best, 2nd best & 3rd best min
+ * values, maintaining an invariant that the measurement time of the
+ * n'th best >= n-1'th best. It also makes sure that the three values
+ * are widely separated in the time window since that bounds the worse
+ * case error when that data is monotonically increasing over the window.
+ *
+ * Upon getting a new min, we can forget everything earlier because it
+ * has no value - the new min is <= everything else in the window by
+ * definition and it's the most recent. So we restart fresh on every new min
+ * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd
+ * best.
+ */
+static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
+{
+ const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
+ struct rtt_meas *m = tcp_sk(sk)->rtt_min;
+ struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now };
+ u32 elapsed;
+
+ /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
+ if (unlikely(rttm.rtt <= m[0].rtt))
+ m[0] = m[1] = m[2] = rttm;
+ else if (rttm.rtt <= m[1].rtt)
+ m[1] = m[2] = rttm;
+ else if (rttm.rtt <= m[2].rtt)
+ m[2] = rttm;
+
+ elapsed = now - m[0].ts;
+ if (unlikely(elapsed > wlen)) {
+ /* Passed entire window without a new min so make 2nd choice
+ * the new min & 3rd choice the new 2nd. So forth and so on.
+ */
+ m[0] = m[1];
+ m[1] = m[2];
+ m[2] = rttm;
+ if (now - m[0].ts > wlen) {
+ m[0] = m[1];
+ m[1] = rttm;
+ if (now - m[0].ts > wlen)
+ m[0] = rttm;
+ }
+ } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) {
+ /* Passed a quarter of the window without a new min so
+ * take 2nd choice from the 2nd quarter of the window.
+ */
+ m[2] = m[1] = rttm;
+ } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) {
+ /* Passed half the window without a new min so take the 3rd
+ * choice from the last half of the window.
+ */
+ m[2] = rttm;
+ }
+}
+
static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
- long seq_rtt_us, long sack_rtt_us)
+ long seq_rtt_us, long sack_rtt_us,
+ long ca_rtt_us)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -2925,9 +2944,6 @@
* Karn's algorithm forbids taking RTT if some retransmitted data
* is acked (RFC6298).
*/
- if (flag & FLAG_RETRANS_DATA_ACKED)
- seq_rtt_us = -1L;
-
if (seq_rtt_us < 0)
seq_rtt_us = sack_rtt_us;
@@ -2939,11 +2955,16 @@
*/
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
flag & FLAG_ACKED)
- seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-
+ seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp -
+ tp->rx_opt.rcv_tsecr);
if (seq_rtt_us < 0)
return false;
+ /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
+ * always taken together with ACK, SACK, or TS-opts. Any negative
+ * values will be skipped with the seq_rtt_us < 0 check above.
+ */
+ tcp_update_rtt_min(sk, ca_rtt_us);
tcp_rtt_estimator(sk, seq_rtt_us);
tcp_set_rto(sk);
@@ -2964,7 +2985,7 @@
rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
}
- tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L);
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
}
@@ -3131,6 +3152,8 @@
if (sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= acked_pcount;
+ else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb))
+ tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
@@ -3169,7 +3192,7 @@
flag |= FLAG_SACK_RENEGING;
skb_mstamp_get(&now);
- if (likely(first_ackt.v64)) {
+ if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
}
@@ -3178,7 +3201,8 @@
ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
}
- rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
+ rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
+ ca_rtt_us);
if (flag & FLAG_ACKED) {
tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 30dd45c..1c2648b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1247,7 +1247,9 @@
*/
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
@@ -1323,7 +1325,7 @@
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
- __inet_hash_nolisten(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 41828bd..3575dd1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -470,6 +470,7 @@
newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+ newtp->rtt_min[0].rtt = ~0U;
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
@@ -547,6 +548,8 @@
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
+ newtp->rack.mstamp.v64 = 0;
+ newtp->rack.advanced = 0;
newtp->saved_syn = req->saved_syn;
req->saved_syn = NULL;
@@ -577,6 +580,7 @@
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
bool paws_reject = false;
+ bool own_req;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
@@ -764,18 +768,14 @@
* ESTABLISHED STATE. If it will be dropped after
* socket is created, wait for troubles.
*/
- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+ req, &own_req);
if (!child)
goto listen_overflow;
sock_rps_save_rxhash(child, skb);
tcp_synack_rtt_meas(child, req);
- inet_csk_reqsk_queue_drop(sk, req);
- inet_csk_reqsk_queue_add(sk, req, child);
- /* Warning: caller must not call reqsk_put(req);
- * child stole last reference on it.
- */
- return child;
+ return inet_csk_complete_hashdance(sk, child, req, own_req);
listen_overflow:
if (!sysctl_tcp_abort_on_overflow) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 19adedb..f4f9793 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2655,8 +2655,6 @@
net_dbg_ratelimited("retrans_out leaked\n");
}
#endif
- if (!tp->retrans_out)
- tp->lost_retrans_low = tp->snd_nxt;
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);
@@ -2664,10 +2662,6 @@
if (!tp->retrans_stamp)
tp->retrans_stamp = tcp_skb_timestamp(skb);
- /* snd_nxt is stored to detect loss of retransmitted segment,
- * see tcp_input.c tcp_sacktag_write_queue().
- */
- TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
} else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
@@ -3416,7 +3410,7 @@
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
skb_mstamp_get(&skb->skb_mstamp);
- NET_INC_STATS_BH(sock_net(sk), mib);
+ NET_INC_STATS(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
new file mode 100644
index 0000000..5353085
--- /dev/null
+++ b/net/ipv4/tcp_recovery.c
@@ -0,0 +1,109 @@
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
+
+/* Marks a packet lost, if some packet sent later has been (s)acked.
+ * The underlying idea is similar to the traditional dupthresh and FACK
+ * but they look at different metrics:
+ *
+ * dupthresh: 3 OOO packets delivered (packet count)
+ * FACK: sequence delta to highest sacked sequence (sequence space)
+ * RACK: sent time delta to the latest delivered packet (time domain)
+ *
+ * The advantage of RACK is it applies to both original and retransmitted
+ * packet and therefore is robust against tail losses. Another advantage
+ * is being more resilient to reordering by simply allowing some
+ * "settling delay", instead of tweaking the dupthresh.
+ *
+ * The current version is only used after recovery starts but can be
+ * easily extended to detect the first loss.
+ */
+int tcp_rack_mark_lost(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ u32 reo_wnd, prior_retrans = tp->retrans_out;
+
+ if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
+ return 0;
+
+ /* Reset the advanced flag to avoid unnecessary queue scanning */
+ tp->rack.advanced = 0;
+
+ /* To be more reordering resilient, allow min_rtt/4 settling delay
+ * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
+ * RTT because reordering is often a path property and less related
+ * to queuing or delayed ACKs.
+ *
+ * TODO: measure and adapt to the observed reordering delay, and
+ * use a timer to retransmit like the delayed early retransmit.
+ */
+ reo_wnd = 1000;
+ if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
+ reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
+
+ tcp_for_write_queue(skb, sk) {
+ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+
+ if (skb == tcp_send_head(sk))
+ break;
+
+ /* Skip ones already (s)acked */
+ if (!after(scb->end_seq, tp->snd_una) ||
+ scb->sacked & TCPCB_SACKED_ACKED)
+ continue;
+
+ if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
+
+ if (skb_mstamp_us_delta(&tp->rack.mstamp,
+ &skb->skb_mstamp) <= reo_wnd)
+ continue;
+
+ /* skb is lost if packet sent later is sacked */
+ tcp_skb_mark_lost_uncond_verify(tp, skb);
+ if (scb->sacked & TCPCB_SACKED_RETRANS) {
+ scb->sacked &= ~TCPCB_SACKED_RETRANS;
+ tp->retrans_out -= tcp_skb_pcount(skb);
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPLOSTRETRANSMIT);
+ }
+ } else if (!(scb->sacked & TCPCB_RETRANS)) {
+ /* Original data are sent sequentially so stop early
+ * b/c the rest are all sent after rack_sent
+ */
+ break;
+ }
+ }
+ return prior_retrans - tp->retrans_out;
+}
+
+/* Record the most recently (re)sent time among the (s)acked packets */
+void tcp_rack_advance(struct tcp_sock *tp,
+ const struct skb_mstamp *xmit_time, u8 sacked)
+{
+ if (tp->rack.mstamp.v64 &&
+ !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
+ return;
+
+ if (sacked & TCPCB_RETRANS) {
+ struct skb_mstamp now;
+
+ /* If the sacked packet was retransmitted, it's ambiguous
+ * whether the retransmission or the original (or the prior
+ * retransmission) was sacked.
+ *
+ * If the original is lost, there is no ambiguity. Otherwise
+ * we assume the original can be delayed up to aRTT + min_rtt.
+ * the aRTT term is bounded by the fast recovery or timeout,
+ * so it's at least one RTT (i.e., retransmission is at least
+ * an RTT later).
+ */
+ skb_mstamp_get(&now);
+ if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
+ return;
+ }
+
+ tp->rack.mstamp = *xmit_time;
+ tp->rack.advanced = 1;
+}
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 9f298d0..7ee6518 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -30,6 +30,8 @@
mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu) {
+ skb->protocol = htons(ETH_P_IP);
+
if (skb->sk)
xfrm_local_error(skb, mtu);
else
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d135350..d72fa90 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3147,6 +3147,32 @@
}
break;
+ case NETDEV_CHANGEMTU:
+ /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
+ if (dev->mtu < IPV6_MIN_MTU) {
+ addrconf_ifdown(dev, 1);
+ break;
+ }
+
+ if (idev) {
+ rt6_mtu_change(dev, dev->mtu);
+ idev->cnf.mtu6 = dev->mtu;
+ break;
+ }
+
+ /* allocate new idev */
+ idev = ipv6_add_dev(dev);
+ if (IS_ERR(idev))
+ break;
+
+ /* device is still not ready */
+ if (!(idev->if_flags & IF_READY))
+ break;
+
+ run_pending = 1;
+
+ /* fall through */
+
case NETDEV_UP:
case NETDEV_CHANGE:
if (dev->flags & IFF_SLAVE)
@@ -3170,7 +3196,7 @@
idev->if_flags |= IF_READY;
run_pending = 1;
}
- } else {
+ } else if (event == NETDEV_CHANGE) {
if (!addrconf_qdisc_ok(dev)) {
/* device is still not ready. */
break;
@@ -3235,24 +3261,6 @@
}
break;
- case NETDEV_CHANGEMTU:
- if (idev && dev->mtu >= IPV6_MIN_MTU) {
- rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
- break;
- }
-
- if (!idev && dev->mtu >= IPV6_MIN_MTU) {
- idev = ipv6_add_dev(dev);
- if (!IS_ERR(idev))
- break;
- }
-
- /*
- * if MTU under IPV6_MIN_MTU.
- * Stop IPv6 on this interface.
- */
-
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
/*
@@ -4788,7 +4796,8 @@
return -EMSGSIZE;
}
-static size_t inet6_get_link_af_size(const struct net_device *dev)
+static size_t inet6_get_link_af_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
if (!__in6_dev_get(dev))
return 0;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 9f777ec..ed33abf 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,6 +32,7 @@
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
+ struct rt6_info *rt;
struct fib_lookup_arg arg = {
.lookup_ptr = lookup,
.flags = FIB_LOOKUP_NOREF,
@@ -40,11 +41,21 @@
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
- if (arg.result)
- return arg.result;
+ rt = arg.result;
- dst_hold(&net->ipv6.ip6_null_entry->dst);
- return &net->ipv6.ip6_null_entry->dst;
+ if (!rt) {
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return &net->ipv6.ip6_null_entry->dst;
+ }
+
+ if (rt->rt6i_flags & RTF_REJECT &&
+ rt->dst.error == -EAGAIN) {
+ ip6_rt_put(rt);
+ rt = net->ipv6.ip6_null_entry;
+ dst_hold(&rt->dst);
+ }
+
+ return &rt->dst;
}
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index efb1c00..36c5a98 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -453,7 +453,8 @@
* and anycast addresses will be checked later.
*/
if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
- net_dbg_ratelimited("icmp6_send: addr_any/mcast source\n");
+ net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
+ &hdr->saddr, &hdr->daddr);
return;
}
@@ -461,7 +462,8 @@
* Never answer to a ICMP packet.
*/
if (is_ineligible(skb)) {
- net_dbg_ratelimited("icmp6_send: no reply to icmp error\n");
+ net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
+ &hdr->saddr, &hdr->daddr);
return;
}
@@ -513,7 +515,8 @@
len = skb->len - msg.offset;
len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
if (len < 0) {
- net_dbg_ratelimited("icmp: len problem\n");
+ net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
+ &hdr->saddr, &hdr->daddr);
goto out_dst_release;
}
@@ -785,7 +788,8 @@
if (type & ICMPV6_INFOMSG_MASK)
break;
- net_dbg_ratelimited("icmpv6: msg of unknown type\n");
+ net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n",
+ saddr, daddr);
/*
* error of unknown type.
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 09fddf7..0c7e276 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -286,7 +286,17 @@
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
- return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+ struct rt6_info *rt;
+
+ rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+ if (rt->rt6i_flags & RTF_REJECT &&
+ rt->dst.error == -EAGAIN) {
+ ip6_rt_put(rt);
+ rt = net->ipv6.ip6_null_entry;
+ dst_hold(&rt->dst);
+ }
+
+ return &rt->dst;
}
static void __net_init fib6_tables_init(struct net *net)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 08b6204..eeca943 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -264,6 +264,9 @@
struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
int err = -ENOSYS;
+ if (skb->encapsulation)
+ skb_set_inner_network_header(skb, nhoff);
+
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
rcu_read_lock();
@@ -280,6 +283,13 @@
return err;
}
+static int sit_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ skb->encapsulation = 1;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SIT;
+ return ipv6_gro_complete(skb, nhoff);
+}
+
static struct packet_offload ipv6_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IPV6),
.callbacks = {
@@ -292,6 +302,8 @@
static const struct net_offload sit_offload = {
.callbacks = {
.gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = sit_gro_complete,
},
};
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0c89671..c265068 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/overflow-arith.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/net.h>
@@ -596,7 +597,10 @@
if (np->frag_size)
mtu = np->frag_size;
}
- mtu -= hlen + sizeof(struct frag_hdr);
+
+ if (overflow_usub(mtu, hlen + sizeof(struct frag_hdr), &mtu) ||
+ mtu <= 7)
+ goto fail_toobig;
frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr);
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 96833e4..f6a024e 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -58,6 +58,7 @@
config NF_DUP_IPV6
tristate "Netfilter IPv6 packet duplication to alternate destination"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
help
This option enables the nf_dup_ipv6 core, which duplicates an IPv6
packet to be rerouted to another destination.
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d061963..2701cb3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1171,6 +1171,7 @@
{
struct dst_entry *dst;
int flags = 0;
+ bool any_src;
dst = l3mdev_rt6_dst_by_oif(net, fl6);
if (dst)
@@ -1178,11 +1179,12 @@
fl6->flowi6_iif = LOOPBACK_IFINDEX;
+ any_src = ipv6_addr_any(&fl6->saddr);
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
- fl6->flowi6_oif)
+ (fl6->flowi6_oif && any_src))
flags |= RT6_LOOKUP_F_IFACE;
- if (!ipv6_addr_any(&fl6->saddr))
+ if (!any_src)
flags |= RT6_LOOKUP_F_HAS_SADDR;
else if (sk)
flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f495d18..714bc5a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -965,7 +965,9 @@
static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp;
@@ -984,7 +986,8 @@
* v6 mapped
*/
- newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
+ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
+ req_unhash, own_req);
if (!newsk)
return NULL;
@@ -1145,7 +1148,7 @@
tcp_done(newsk);
goto out;
}
- __inet_hash(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 9db067a..4d09ce6 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -79,6 +79,7 @@
if (!skb->ignore_df && skb->len > mtu) {
skb->dev = dst->dev;
+ skb->protocol = htons(ETH_P_IPV6);
if (xfrm6_local_dontfrag(skb))
xfrm6_local_rxpmtu(skb, mtu);
@@ -143,6 +144,7 @@
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
int mtu;
+ bool toobig;
#ifdef CONFIG_NETFILTER
if (!x) {
@@ -151,25 +153,29 @@
}
#endif
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ goto skip_frag;
+
if (skb->protocol == htons(ETH_P_IPV6))
mtu = ip6_skb_dst_mtu(skb);
else
mtu = dst_mtu(skb_dst(skb));
- if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+ toobig = skb->len > mtu && !skb_is_gso(skb);
+
+ if (toobig && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
return -EMSGSIZE;
- } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
+ } else if (!skb->ignore_df && toobig && skb->sk) {
xfrm_local_error(skb, mtu);
return -EMSGSIZE;
}
- if (x->props.mode == XFRM_MODE_TUNNEL &&
- ((skb->len > mtu && !skb_is_gso(skb)) ||
- dst_allfrag(skb_dst(skb)))) {
+ if (toobig || dst_allfrag(skb_dst(skb)))
return ip6_fragment(net, sk, skb,
__xfrm6_output_finish);
- }
+
+skip_frag:
return x->outer_mode->afinfo->output_finish(sk, skb);
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 08c9c93..2cc5840 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -177,7 +177,8 @@
return;
case IPPROTO_ICMPV6:
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+ if (!onlyproto && (nh + offset + 2 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
u8 *icmp;
nh = skb_network_header(skb);
@@ -191,7 +192,8 @@
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
offset += ipv6_optlen(exthdr);
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+ if (!onlyproto && (nh + offset + 3 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
struct ip6_mh *mh;
nh = skb_network_header(skb);
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index a26c401..4396459 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1839,7 +1839,7 @@
for (element = hashbin_get_first(iter->hashbin);
element != NULL;
element = hashbin_get_next(iter->hashbin)) {
- if (!off || *off-- == 0) {
+ if (!off || (*off)-- == 0) {
/* NB: hashbin left locked */
return element;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 83a7068..f9c9ecb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -261,7 +261,7 @@
err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
- /* Error is cleare after succecful sending to at least one
+ /* Error is cleared after successful sending to at least one
* registered KM */
if ((broadcast_flags & BROADCAST_REGISTERED) && err)
err = err2;
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 783e891..f9137a8 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -27,7 +27,6 @@
key.o \
util.o \
wme.o \
- event.o \
chan.o \
trace.o mlme.o \
tdls.o \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 68e551e..713cdbf 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -17,7 +17,6 @@
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
-#include "cfg.h"
#include "rate.h"
#include "mesh.h"
#include "wme.h"
@@ -469,45 +468,6 @@
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
-void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
-{
- rinfo->flags = 0;
-
- if (sta->last_rx_rate_flag & RX_FLAG_HT) {
- rinfo->flags |= RATE_INFO_FLAGS_MCS;
- rinfo->mcs = sta->last_rx_rate_idx;
- } else if (sta->last_rx_rate_flag & RX_FLAG_VHT) {
- rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
- rinfo->nss = sta->last_rx_rate_vht_nss;
- rinfo->mcs = sta->last_rx_rate_idx;
- } else {
- struct ieee80211_supported_band *sband;
- int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
- u16 brate;
-
- sband = sta->local->hw.wiphy->bands[
- ieee80211_get_sdata_band(sta->sdata)];
- brate = sband->bitrates[sta->last_rx_rate_idx].bitrate;
- rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
- }
-
- if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
- rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-
- if (sta->last_rx_rate_flag & RX_FLAG_5MHZ)
- rinfo->bw = RATE_INFO_BW_5;
- else if (sta->last_rx_rate_flag & RX_FLAG_10MHZ)
- rinfo->bw = RATE_INFO_BW_10;
- else if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
- rinfo->bw = RATE_INFO_BW_40;
- else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
- rinfo->bw = RATE_INFO_BW_80;
- else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
- rinfo->bw = RATE_INFO_BW_160;
- else
- rinfo->bw = RATE_INFO_BW_20;
-}
-
static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
@@ -1138,6 +1098,7 @@
}
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
+ sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
if (set & BIT(NL80211_STA_FLAG_MFP))
set_sta_flag(sta, WLAN_STA_MFP);
else
@@ -1427,7 +1388,7 @@
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
ieee80211_recalc_ps_vif(sdata);
}
@@ -2462,7 +2423,7 @@
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
ieee80211_recalc_ps_vif(sdata);
return 0;
diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
deleted file mode 100644
index 2d51f62..0000000
--- a/net/mac80211/cfg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * mac80211 configuration hooks for cfg80211
- */
-#ifndef __CFG_H
-#define __CFG_H
-
-extern const struct cfg80211_ops mac80211_config_ops;
-
-#endif /* __CFG_H */
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 06d5293..a39512f 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -50,7 +50,6 @@
STA_OPS(name)
STA_FILE(aid, sta.aid, D);
-STA_FILE(last_ack_signal, last_ack_signal, D);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -366,11 +365,10 @@
DEBUGFS_ADD(agg_status);
DEBUGFS_ADD(ht_capa);
DEBUGFS_ADD(vht_capa);
- DEBUGFS_ADD(last_ack_signal);
- DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
- DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
- DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
+ DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
+ DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
+ DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered);
if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
debugfs_create_x32("driver_buffered_tids", 0400,
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 188faab..9cc986d 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -40,7 +40,7 @@
"rx_duplicates", "rx_fragments", "rx_dropped",
"tx_packets", "tx_bytes",
"tx_filtered", "tx_retry_failed", "tx_retries",
- "beacon_loss", "sta_state", "txrate", "rxrate", "signal",
+ "sta_state", "txrate", "rxrate", "signal",
"channel", "noise", "ch_time", "ch_time_busy",
"ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
};
@@ -77,20 +77,19 @@
memset(data, 0, sizeof(u64) * STA_STATS_LEN);
-#define ADD_STA_STATS(sta) \
- do { \
- data[i++] += sta->rx_packets; \
- data[i++] += sta->rx_bytes; \
- data[i++] += sta->num_duplicates; \
- data[i++] += sta->rx_fragments; \
- data[i++] += sta->rx_dropped; \
- \
- data[i++] += sinfo.tx_packets; \
- data[i++] += sinfo.tx_bytes; \
- data[i++] += sta->tx_filtered_count; \
- data[i++] += sta->tx_retry_failed; \
- data[i++] += sta->tx_retry_count; \
- data[i++] += sta->beacon_loss_count; \
+#define ADD_STA_STATS(sta) \
+ do { \
+ data[i++] += sta->rx_stats.packets; \
+ data[i++] += sta->rx_stats.bytes; \
+ data[i++] += sta->rx_stats.num_duplicates; \
+ data[i++] += sta->rx_stats.fragments; \
+ data[i++] += sta->rx_stats.dropped; \
+ \
+ data[i++] += sinfo.tx_packets; \
+ data[i++] += sinfo.tx_bytes; \
+ data[i++] += sta->status_stats.filtered; \
+ data[i++] += sta->status_stats.retry_failed; \
+ data[i++] += sta->status_stats.retry_count; \
} while (0)
/* For Managed stations, find the single station based on BSSID
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
deleted file mode 100644
index 01ae759..0000000
--- a/net/mac80211/event.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * mac80211 - events
- */
-#include <net/cfg80211.h>
-#include "ieee80211_i.h"
-
-/*
- * Indicate a failed Michael MIC to userspace. If the caller knows the TSC of
- * the frame that generated the MIC failure (i.e., if it was provided by the
- * driver or is still in the frame), it should provide that information.
- */
-void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
- struct ieee80211_hdr *hdr, const u8 *tsc,
- gfp_t gfp)
-{
- cfg80211_michael_mic_failure(sdata->dev, hdr->addr2,
- (hdr->addr1[0] & 0x01) ?
- NL80211_KEYTYPE_GROUP :
- NL80211_KEYTYPE_PAIRWISE,
- keyidx, tsc, gfp);
-}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 7f72bc9..2001555 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -229,7 +229,7 @@
struct cfg80211_chan_def chandef;
struct ieee80211_channel *chan;
struct beacon_data *presp;
- enum nl80211_bss_scan_width scan_width;
+ struct cfg80211_inform_bss bss_meta = {};
bool have_higher_than_11mbit;
bool radar_required;
int err;
@@ -383,10 +383,11 @@
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- scan_width = cfg80211_chandef_to_scan_width(&chandef);
- bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan,
- scan_width, mgmt,
- presp->head_len, 0, GFP_KERNEL);
+ bss_meta.chan = chan;
+ bss_meta.scan_width = cfg80211_chandef_to_scan_width(&chandef);
+ bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt,
+ presp->head_len, GFP_KERNEL);
+
cfg80211_put_bss(local->hw.wiphy, bss);
netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
@@ -646,7 +647,7 @@
return NULL;
}
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
@@ -668,7 +669,8 @@
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sta->sdata == sdata &&
- time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
+ time_after(sta->rx_stats.last_rx +
+ IEEE80211_IBSS_MERGE_INTERVAL,
jiffies)) {
active++;
break;
@@ -1234,7 +1236,7 @@
if (!sta)
return;
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
@@ -1252,7 +1254,7 @@
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
- unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
+ unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
mutex_lock(&local->sta_mtx);
@@ -1260,8 +1262,8 @@
if (sdata != sta->sdata)
continue;
- if (time_after(jiffies, sta->last_rx + exp_time) ||
- (time_after(jiffies, sta->last_rx + exp_rsn_time) &&
+ if (time_after(jiffies, sta->rx_stats.last_rx + exp_time) ||
+ (time_after(jiffies, sta->rx_stats.last_rx + exp_rsn) &&
sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
sta->sta_state != IEEE80211_STA_AUTHORIZED ?
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f9605f1..62f2a97 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -34,6 +34,8 @@
#include "sta_info.h"
#include "debug.h"
+extern const struct cfg80211_ops mac80211_config_ops;
+
struct ieee80211_local;
/* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -501,6 +503,9 @@
*/
unsigned int count_beacon_signal;
+ /* Number of times beacon loss was invoked. */
+ unsigned int beacon_loss_count;
+
/*
* Last Beacon frame signal strength average (ave_beacon_signal / 16)
* that triggered a cqm event. 0 indicates that no event has been
@@ -1305,7 +1310,6 @@
struct work_struct dynamic_ps_enable_work;
struct work_struct dynamic_ps_disable_work;
struct timer_list dynamic_ps_timer;
- struct notifier_block network_latency_notifier;
struct notifier_block ifa_notifier;
struct notifier_block ifa6_notifier;
@@ -1491,10 +1495,8 @@
struct cfg80211_disassoc_request *req);
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
-void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps(struct ieee80211_local *local);
void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
-int ieee80211_max_network_latency(struct notifier_block *nb,
- unsigned long data, void *dummy);
int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -1766,9 +1768,6 @@
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
int rate, int erp, int short_preamble,
int shift);
-void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
- struct ieee80211_hdr *hdr, const u8 *tsc,
- gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 42d7f0f..f848c75 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -709,7 +709,7 @@
if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -1016,7 +1016,7 @@
drv_remove_interface(local, sdata);
}
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
if (cancel_scan)
flush_delayed_work(&local->scan_work);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 9b813a2..273c96d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -20,7 +20,6 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
-#include <linux/pm_qos.h>
#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
@@ -32,7 +31,6 @@
#include "mesh.h"
#include "wep.h"
#include "led.h"
-#include "cfg.h"
#include "debugfs.h"
void ieee80211_configure_filter(struct ieee80211_local *local)
@@ -1083,13 +1081,6 @@
rtnl_unlock();
- local->network_latency_notifier.notifier_call =
- ieee80211_max_network_latency;
- result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
- &local->network_latency_notifier);
- if (result)
- goto fail_pm_qos;
-
#ifdef CONFIG_INET
local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
result = register_inetaddr_notifier(&local->ifa_notifier);
@@ -1114,10 +1105,7 @@
#endif
#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
fail_ifa:
- pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
- &local->network_latency_notifier);
#endif
- fail_pm_qos:
rtnl_lock();
rate_control_deinitialize(local);
ieee80211_remove_interfaces(local);
@@ -1143,8 +1131,6 @@
tasklet_kill(&local->tx_pending_tasklet);
tasklet_kill(&local->tasklet);
- pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
- &local->network_latency_notifier);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&local->ifa_notifier);
#endif
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d80e0a4..c6be0b4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -329,7 +329,7 @@
if (sta->mesh->fail_avg >= 100)
return MAX_METRIC;
- sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
+ sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
rate = cfg80211_calculate_bitrate(&rinfo);
if (WARN_ON(!rate))
return MAX_METRIC;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a360b24..c1f8892 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -60,7 +60,9 @@
{
s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
return rssi_threshold == 0 ||
- (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
+ (sta &&
+ (s8)-ewma_signal_read(&sta->rx_stats.avg_signal) >
+ rssi_threshold);
}
/**
@@ -390,7 +392,7 @@
rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
spin_lock_bh(&sta->mesh->plink_lock);
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* rates and capabilities don't change during peering */
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 56ef9a8..ded4b97 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -20,7 +20,6 @@
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -1476,7 +1475,7 @@
}
/* need to hold RTNL or interface lock */
-void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
+void ieee80211_recalc_ps(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata, *found = NULL;
int count = 0;
@@ -1505,48 +1504,23 @@
}
if (count == 1 && ieee80211_powersave_allowed(found)) {
+ u8 dtimper = found->u.mgd.dtim_period;
s32 beaconint_us;
- if (latency < 0)
- latency = pm_qos_request(PM_QOS_NETWORK_LATENCY);
-
beaconint_us = ieee80211_tu_to_usec(
found->vif.bss_conf.beacon_int);
timeout = local->dynamic_ps_forced_timeout;
- if (timeout < 0) {
- /*
- * Go to full PSM if the user configures a very low
- * latency requirement.
- * The 2000 second value is there for compatibility
- * until the PM_QOS_NETWORK_LATENCY is configured
- * with real values.
- */
- if (latency > (1900 * USEC_PER_MSEC) &&
- latency != (2000 * USEC_PER_SEC))
- timeout = 0;
- else
- timeout = 100;
- }
+ if (timeout < 0)
+ timeout = 100;
local->hw.conf.dynamic_ps_timeout = timeout;
- if (beaconint_us > latency) {
- local->ps_sdata = NULL;
- } else {
- int maxslp = 1;
- u8 dtimper = found->u.mgd.dtim_period;
+ /* If the TIM IE is invalid, pretend the value is 1 */
+ if (!dtimper)
+ dtimper = 1;
- /* If the TIM IE is invalid, pretend the value is 1 */
- if (!dtimper)
- dtimper = 1;
- else if (dtimper > 1)
- maxslp = min_t(int, dtimper,
- latency / beaconint_us);
-
- local->hw.conf.max_sleep_period = maxslp;
- local->hw.conf.ps_dtim_period = dtimper;
- local->ps_sdata = found;
- }
+ local->hw.conf.ps_dtim_period = dtimper;
+ local->ps_sdata = found;
} else {
local->ps_sdata = NULL;
}
@@ -1997,7 +1971,7 @@
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
ieee80211_recalc_smps(sdata);
@@ -2165,7 +2139,7 @@
__ieee80211_stop_poll(sdata);
mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
@@ -2341,7 +2315,7 @@
goto out;
mutex_lock(&sdata->local->iflist_mtx);
- ieee80211_recalc_ps(sdata->local, -1);
+ ieee80211_recalc_ps(sdata->local);
mutex_unlock(&sdata->local->iflist_mtx);
ifmgd->probe_send_count = 0;
@@ -2446,15 +2420,9 @@
container_of(work, struct ieee80211_sub_if_data,
u.mgd.beacon_connection_loss_work);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct sta_info *sta;
- if (ifmgd->associated) {
- rcu_read_lock();
- sta = sta_info_get(sdata, ifmgd->bssid);
- if (sta)
- sta->beacon_loss_count++;
- rcu_read_unlock();
- }
+ if (ifmgd->associated)
+ ifmgd->beacon_loss_count++;
if (ifmgd->connection_loss) {
sdata_info(sdata, "Connection to AP %pM lost\n",
@@ -3044,8 +3012,12 @@
rate_control_rate_init(sta);
- if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
+ if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
set_sta_flag(sta, WLAN_STA_MFP);
+ sta->sta.mfp = true;
+ } else {
+ sta->sta.mfp = false;
+ }
sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
@@ -3544,7 +3516,7 @@
ifmgd->have_beacon = true;
mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
ieee80211_recalc_ps_vif(sdata);
@@ -4148,21 +4120,6 @@
rcu_read_unlock();
}
-int ieee80211_max_network_latency(struct notifier_block *nb,
- unsigned long data, void *dummy)
-{
- s32 latency_usec = (s32) data;
- struct ieee80211_local *local =
- container_of(nb, struct ieee80211_local,
- network_latency_notifier);
-
- mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, latency_usec);
- mutex_unlock(&local->iflist_mtx);
-
- return NOTIFY_OK;
-}
-
static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss)
{
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 573b81a..0be0aad 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -75,7 +75,7 @@
if (!sta)
return;
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* Add only mandatory rates for now */
sband = local->hw.wiphy->bands[band];
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5bc0b88..8bae5de 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1113,16 +1113,16 @@
is_multicast_ether_addr(hdr->addr1))
return RX_CONTINUE;
- if (rx->sta) {
- if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
- rx->sta->last_seq_ctrl[rx->seqno_idx] ==
- hdr->seq_ctrl)) {
- I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
- rx->sta->num_duplicates++;
- return RX_DROP_UNUSABLE;
- } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
- rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
- }
+ if (!rx->sta)
+ return RX_CONTINUE;
+
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
+ I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
+ rx->sta->rx_stats.num_duplicates++;
+ return RX_DROP_UNUSABLE;
+ } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
+ rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
}
return RX_CONTINUE;
@@ -1396,51 +1396,56 @@
NL80211_IFTYPE_ADHOC);
if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1)) {
- sta->last_rx_rate_idx = status->rate_idx;
- sta->last_rx_rate_flag = status->flag;
- sta->last_rx_rate_vht_flag = status->vht_flag;
- sta->last_rx_rate_vht_nss = status->vht_nss;
+ sta->rx_stats.last_rate_idx =
+ status->rate_idx;
+ sta->rx_stats.last_rate_flag =
+ status->flag;
+ sta->rx_stats.last_rate_vht_flag =
+ status->vht_flag;
+ sta->rx_stats.last_rate_vht_nss =
+ status->vht_nss;
}
}
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
} else if (!is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
*/
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control)) {
- sta->last_rx_rate_idx = status->rate_idx;
- sta->last_rx_rate_flag = status->flag;
- sta->last_rx_rate_vht_flag = status->vht_flag;
- sta->last_rx_rate_vht_nss = status->vht_nss;
+ sta->rx_stats.last_rate_idx = status->rate_idx;
+ sta->rx_stats.last_rate_flag = status->flag;
+ sta->rx_stats.last_rate_vht_flag = status->vht_flag;
+ sta->rx_stats.last_rate_vht_nss = status->vht_nss;
}
}
if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_notify(rx->sdata, hdr);
- sta->rx_fragments++;
- sta->rx_bytes += rx->skb->len;
+ sta->rx_stats.fragments++;
+ sta->rx_stats.bytes += rx->skb->len;
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
- sta->last_signal = status->signal;
- ewma_signal_add(&sta->avg_signal, -status->signal);
+ sta->rx_stats.last_signal = status->signal;
+ ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal);
}
if (status->chains) {
- sta->chains = status->chains;
+ sta->rx_stats.chains = status->chains;
for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
int signal = status->chain_signal[i];
if (!(status->chains & BIT(i)))
continue;
- sta->chain_signal_last[i] = signal;
- ewma_signal_add(&sta->chain_signal_avg[i], -signal);
+ sta->rx_stats.chain_signal_last[i] = signal;
+ ewma_signal_add(&sta->rx_stats.chain_signal_avg[i],
+ -signal);
}
}
@@ -1500,7 +1505,7 @@
* Update counter and free packet here to avoid
* counting this as a dropped packed.
*/
- sta->rx_packets++;
+ sta->rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -1922,7 +1927,7 @@
ieee80211_led_rx(rx->local);
out_no_led:
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_CONTINUE;
}
@@ -2376,7 +2381,7 @@
* for non-QoS-data frames. Here we know it's a data
* frame, so count MSDUs.
*/
- rx->sta->rx_msdu[rx->seqno_idx]++;
+ rx->sta->rx_stats.msdu[rx->seqno_idx]++;
}
/*
@@ -2413,7 +2418,7 @@
skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
schedule_work(&local->tdls_chsw_work);
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_QUEUED;
}
@@ -2875,7 +2880,7 @@
handled:
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
@@ -2884,7 +2889,7 @@
skb_queue_tail(&sdata->skb_queue, rx->skb);
ieee80211_queue_work(&local->hw, &sdata->work);
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_QUEUED;
}
@@ -2911,7 +2916,7 @@
if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
rx->skb->data, rx->skb->len, 0)) {
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -3030,7 +3035,7 @@
skb_queue_tail(&sdata->skb_queue, rx->skb);
ieee80211_queue_work(&rx->local->hw, &sdata->work);
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_QUEUED;
}
@@ -3112,7 +3117,7 @@
case RX_DROP_MONITOR:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_dropped++;
+ rx->sta->rx_stats.dropped++;
/* fall through */
case RX_CONTINUE: {
struct ieee80211_rate *rate = NULL;
@@ -3132,7 +3137,7 @@
case RX_DROP_UNUSABLE:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_dropped++;
+ rx->sta->rx_stats.dropped++;
dev_kfree_skb(rx->skb);
break;
case RX_QUEUED:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 11d0901..b64fd2b 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -16,7 +16,6 @@
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -67,24 +66,23 @@
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen, srlen;
- enum nl80211_bss_scan_width scan_width;
- s32 signal = 0;
+ struct cfg80211_inform_bss bss_meta = {};
bool signal_valid;
if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
- signal = rx_status->signal * 100;
+ bss_meta.signal = rx_status->signal * 100;
else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
- signal = (rx_status->signal * 100) / local->hw.max_signal;
+ bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
- scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20;
if (rx_status->flag & RX_FLAG_5MHZ)
- scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5;
if (rx_status->flag & RX_FLAG_10MHZ)
- scan_width = NL80211_BSS_CHAN_WIDTH_10;
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
- cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel,
- scan_width, mgmt, len, signal,
- GFP_ATOMIC);
+ bss_meta.chan = channel;
+ cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta,
+ mgmt, len, GFP_ATOMIC);
if (!cbss)
return NULL;
/* In case the signal is invalid update the status */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c364445..f91d187 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -331,7 +331,7 @@
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
sta->sdata = sdata;
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
sta->sta_state = IEEE80211_STA_NONE;
@@ -339,9 +339,9 @@
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
sta->last_connected = ktime_get_seconds();
- ewma_signal_init(&sta->avg_signal);
- for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
- ewma_signal_init(&sta->chain_signal_avg[i]);
+ ewma_signal_init(&sta->rx_stats.avg_signal);
+ for (i = 0; i < ARRAY_SIZE(sta->rx_stats.chain_signal_avg); i++)
+ ewma_signal_init(&sta->rx_stats.chain_signal_avg[i]);
if (local->ops->wake_tx_queue) {
void *txq_data;
@@ -1066,7 +1066,7 @@
if (sdata != sta->sdata)
continue;
- if (time_after(jiffies, sta->last_rx + exp_time)) {
+ if (time_after(jiffies, sta->rx_stats.last_rx + exp_time)) {
sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
sta->sta.addr);
@@ -1806,6 +1806,45 @@
>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
}
+static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+{
+ rinfo->flags = 0;
+
+ if (sta->rx_stats.last_rate_flag & RX_FLAG_HT) {
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ rinfo->mcs = sta->rx_stats.last_rate_idx;
+ } else if (sta->rx_stats.last_rate_flag & RX_FLAG_VHT) {
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ rinfo->nss = sta->rx_stats.last_rate_vht_nss;
+ rinfo->mcs = sta->rx_stats.last_rate_idx;
+ } else {
+ struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
+
+ sband = sta->local->hw.wiphy->bands[
+ ieee80211_get_sdata_band(sta->sdata)];
+ brate = sband->bitrates[sta->rx_stats.last_rate_idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+ }
+
+ if (sta->rx_stats.last_rate_flag & RX_FLAG_SHORT_GI)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (sta->rx_stats.last_rate_flag & RX_FLAG_5MHZ)
+ rinfo->bw = RATE_INFO_BW_5;
+ else if (sta->rx_stats.last_rate_flag & RX_FLAG_10MHZ)
+ rinfo->bw = RATE_INFO_BW_10;
+ else if (sta->rx_stats.last_rate_flag & RX_FLAG_40MHZ)
+ rinfo->bw = RATE_INFO_BW_40;
+ else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_80MHZ)
+ rinfo->bw = RATE_INFO_BW_80;
+ else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_160MHZ)
+ rinfo->bw = RATE_INFO_BW_160;
+ else
+ rinfo->bw = RATE_INFO_BW_20;
+}
+
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -1832,50 +1871,54 @@
BIT(NL80211_STA_INFO_STA_FLAGS) |
BIT(NL80211_STA_INFO_BSS_PARAM) |
BIT(NL80211_STA_INFO_CONNECTED_TIME) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_BEACON_LOSS);
+ BIT(NL80211_STA_INFO_RX_DROP_MISC);
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
+ sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS);
+ }
sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
- sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
+ sinfo->inactive_time =
+ jiffies_to_msecs(jiffies - sta->rx_stats.last_rx);
if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
BIT(NL80211_STA_INFO_TX_BYTES)))) {
sinfo->tx_bytes = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- sinfo->tx_bytes += sta->tx_bytes[ac];
+ sinfo->tx_bytes += sta->tx_stats.bytes[ac];
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) {
sinfo->tx_packets = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- sinfo->tx_packets += sta->tx_packets[ac];
+ sinfo->tx_packets += sta->tx_stats.packets[ac];
sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
}
if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
BIT(NL80211_STA_INFO_RX_BYTES)))) {
- sinfo->rx_bytes = sta->rx_bytes;
+ sinfo->rx_bytes = sta->rx_stats.bytes;
sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
- sinfo->rx_packets = sta->rx_packets;
+ sinfo->rx_packets = sta->rx_stats.packets;
sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) {
- sinfo->tx_retries = sta->tx_retry_count;
+ sinfo->tx_retries = sta->status_stats.retry_count;
sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) {
- sinfo->tx_failed = sta->tx_retry_failed;
+ sinfo->tx_failed = sta->status_stats.retry_failed;
sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
}
- sinfo->rx_dropped_misc = sta->rx_dropped;
- sinfo->beacon_loss_count = sta->beacon_loss_count;
+ sinfo->rx_dropped_misc = sta->rx_stats.dropped;
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
!(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
@@ -1887,33 +1930,35 @@
if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
- sinfo->signal = (s8)sta->last_signal;
+ sinfo->signal = (s8)sta->rx_stats.last_signal;
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
sinfo->signal_avg =
- (s8) -ewma_signal_read(&sta->avg_signal);
+ -ewma_signal_read(&sta->rx_stats.avg_signal);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
}
}
- if (sta->chains &&
+ if (sta->rx_stats.chains &&
!(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
- sinfo->chains = sta->chains;
+ sinfo->chains = sta->rx_stats.chains;
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
- sinfo->chain_signal[i] = sta->chain_signal_last[i];
+ sinfo->chain_signal[i] =
+ sta->rx_stats.chain_signal_last[i];
sinfo->chain_signal_avg[i] =
- (s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
+ -ewma_signal_read(&sta->rx_stats.chain_signal_avg[i]);
}
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) {
- sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
+ sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
+ &sinfo->txrate);
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
}
@@ -1928,12 +1973,12 @@
if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
- tidstats->rx_msdu = sta->rx_msdu[i];
+ tidstats->rx_msdu = sta->rx_stats.msdu[i];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
- tidstats->tx_msdu = sta->tx_msdu[i];
+ tidstats->tx_msdu = sta->tx_stats.msdu[i];
}
if (!(tidstats->filled &
@@ -1941,7 +1986,8 @@
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |=
BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
- tidstats->tx_msdu_retries = sta->tx_msdu_retries[i];
+ tidstats->tx_msdu_retries =
+ sta->status_stats.msdu_retries[i];
}
if (!(tidstats->filled &
@@ -1949,7 +1995,8 @@
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |=
BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
- tidstats->tx_msdu_failed = sta->tx_msdu_failed[i];
+ tidstats->tx_msdu_failed =
+ sta->status_stats.msdu_failed[i];
}
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d5ded87..2cafb21 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -344,12 +344,6 @@
* @rate_ctrl_lock: spinlock used to protect rate control data
* (data inside the algorithm, so serializes calls there)
* @rate_ctrl_priv: rate control private per-STA pointer
- * @last_tx_rate: rate used for last transmit, to report to userspace as
- * "the" transmit rate
- * @last_rx_rate_idx: rx status rate index of the last data packet
- * @last_rx_rate_flag: rx status flag of the last data packet
- * @last_rx_rate_vht_flag: rx status vht flag of the last data packet
- * @last_rx_rate_vht_nss: rx status nss of last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
* @drv_deliver_wk: used for delivering frames after driver PS unblocking
@@ -364,23 +358,9 @@
* the station when it leaves powersave or polls for frames
* @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
* @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on
- * @rx_packets: Number of MSDUs received from this STA
- * @rx_bytes: Number of bytes received from this STA
- * @last_rx: time (in jiffies) when last frame was received from this STA
* @last_connected: time (in seconds) when a station got connected
- * @num_duplicates: number of duplicate frames received from this STA
- * @rx_fragments: number of received MPDUs
- * @rx_dropped: number of dropped MPDUs from this STA
- * @last_signal: signal of last received frame from this STA
- * @avg_signal: moving average of signal of received frames from this STA
- * @last_ack_signal: signal of last received Ack frame from this STA
* @last_seq_ctrl: last received seq/frag number from this STA (per TID
* plus one for non-QoS frames)
- * @tx_filtered_count: number of frames the hardware filtered for this STA
- * @tx_retry_failed: number of frames that failed retry
- * @tx_retry_count: total number of retries for frames to this STA
- * @tx_packets: number of RX/TX MSDUs
- * @tx_bytes: number of bytes transmitted to this STA
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
@@ -388,32 +368,22 @@
* @debugfs: debug filesystem info
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
- * @lost_packets: number of consecutive lost packets
* @sta: station information we share with the driver
* @sta_state: duplicates information about station state (for debug)
* @beacon_loss_count: number of times beacon loss has triggered
* @rcu_head: RCU head used for freeing this station struct
* @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
* taken from HT/VHT capabilities or VHT operating mode notification
- * @chains: chains ever used for RX from this station
- * @chain_signal_last: last signal (per chain)
- * @chain_signal_avg: signal average (per chain)
* @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
* AP only.
* @cipher_scheme: optional cipher scheme for this station
- * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
* @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
- * @tx_msdu: MSDUs transmitted to this station, using IEEE80211_NUM_TID
- * entry for non-QoS frames
- * @tx_msdu_retries: MSDU retries for transmissions to to this station,
- * using IEEE80211_NUM_TID entry for non-QoS frames
- * @tx_msdu_failed: MSDU failures for transmissions to to this station,
- * using IEEE80211_NUM_TID entry for non-QoS frames
- * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
- * entry for non-QoS frames
* @fast_tx: TX fastpath information
* @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
* the BSS one.
+ * @tx_stats: TX statistics
+ * @rx_stats: RX statistics
+ * @status_stats: TX status statistics
*/
struct sta_info {
/* General information, mostly static */
@@ -457,42 +427,49 @@
unsigned long driver_buffered_tids;
unsigned long txq_buffered_tids;
- /* Updated from RX path only, no locking requirements */
- unsigned long rx_packets;
- u64 rx_bytes;
- unsigned long last_rx;
long last_connected;
- unsigned long num_duplicates;
- unsigned long rx_fragments;
- unsigned long rx_dropped;
- int last_signal;
- struct ewma_signal avg_signal;
- int last_ack_signal;
- u8 chains;
- s8 chain_signal_last[IEEE80211_MAX_CHAINS];
- struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
+ /* Updated from RX path only, no locking requirements */
+ struct {
+ unsigned long packets;
+ u64 bytes;
+ unsigned long last_rx;
+ unsigned long num_duplicates;
+ unsigned long fragments;
+ unsigned long dropped;
+ int last_signal;
+ struct ewma_signal avg_signal;
+ u8 chains;
+ s8 chain_signal_last[IEEE80211_MAX_CHAINS];
+ struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
+ int last_rate_idx;
+ u32 last_rate_flag;
+ u32 last_rate_vht_flag;
+ u8 last_rate_vht_nss;
+ u64 msdu[IEEE80211_NUM_TIDS + 1];
+ } rx_stats;
/* Plus 1 for non-QoS frames */
__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
/* Updated from TX status path only, no locking requirements */
- unsigned long tx_filtered_count;
- unsigned long tx_retry_failed, tx_retry_count;
+ struct {
+ unsigned long filtered;
+ unsigned long retry_failed, retry_count;
+ unsigned int lost_packets;
+ unsigned long last_tdls_pkt_time;
+ u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
+ u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
+ } status_stats;
/* Updated from TX path only, no locking requirements */
- u64 tx_packets[IEEE80211_NUM_ACS];
- u64 tx_bytes[IEEE80211_NUM_ACS];
- struct ieee80211_tx_rate last_tx_rate;
- int last_rx_rate_idx;
- u32 last_rx_rate_flag;
- u32 last_rx_rate_vht_flag;
- u8 last_rx_rate_vht_nss;
+ struct {
+ u64 packets[IEEE80211_NUM_ACS];
+ u64 bytes[IEEE80211_NUM_ACS];
+ struct ieee80211_tx_rate last_rate;
+ u64 msdu[IEEE80211_NUM_TIDS + 1];
+ } tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
- u64 tx_msdu[IEEE80211_NUM_TIDS + 1];
- u64 tx_msdu_retries[IEEE80211_NUM_TIDS + 1];
- u64 tx_msdu_failed[IEEE80211_NUM_TIDS + 1];
- u64 rx_msdu[IEEE80211_NUM_TIDS + 1];
/*
* Aggregation information, locked with lock.
@@ -509,15 +486,9 @@
enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
- unsigned int lost_packets;
- unsigned int beacon_loss_count;
-
enum ieee80211_smps_mode known_smps_mode;
const struct ieee80211_cipher_scheme *cipher_scheme;
- /* TDLS timeout data */
- unsigned long last_tdls_pkt_time;
-
u8 reserved_tid;
struct cfg80211_chan_def tdls_chandef;
@@ -688,8 +659,6 @@
void sta_set_rate_info_tx(struct sta_info *sta,
const struct ieee80211_tx_rate *rate,
struct rate_info *rinfo);
-void sta_set_rate_info_rx(struct sta_info *sta,
- struct rate_info *rinfo);
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo);
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9169ccc..5bad05e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -67,7 +67,7 @@
IEEE80211_TX_INTFL_RETRANSMISSION;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
- sta->tx_filtered_count++;
+ sta->status_stats.filtered++;
/*
* Clear more-data bit on filtered frames, it might be set
@@ -183,7 +183,7 @@
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -557,8 +557,9 @@
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- sta->lost_packets++;
- if (!sta->sta.tdls && sta->lost_packets < STA_LOST_PKT_THRESHOLD)
+ sta->status_stats.lost_packets++;
+ if (!sta->sta.tdls &&
+ sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD)
return;
/*
@@ -568,14 +569,15 @@
* mechanism.
*/
if (sta->sta.tdls &&
- (sta->lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
+ (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
time_before(jiffies,
- sta->last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME)))
+ sta->status_stats.last_tdls_pkt_time +
+ STA_LOST_TDLS_PKT_TIME)))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
- sta->lost_packets, GFP_ATOMIC);
- sta->lost_packets = 0;
+ sta->status_stats.lost_packets, GFP_ATOMIC);
+ sta->status_stats.lost_packets = 0;
}
static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
@@ -636,18 +638,18 @@
sta = container_of(pubsta, struct sta_info, sta);
if (!acked)
- sta->tx_retry_failed++;
- sta->tx_retry_count += retry_count;
+ sta->status_stats.retry_failed++;
+ sta->status_stats.retry_count += retry_count;
if (acked) {
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
- if (sta->lost_packets)
- sta->lost_packets = 0;
+ if (sta->status_stats.lost_packets)
+ sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->last_tdls_pkt_time = jiffies;
+ sta->status_stats.last_tdls_pkt_time = jiffies;
} else {
ieee80211_lost_packet(sta, info);
}
@@ -784,7 +786,8 @@
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
(ieee80211_is_data(hdr->frame_control)) &&
(rates_idx != -1))
- sta->last_tx_rate = info->status.rates[rates_idx];
+ sta->tx_stats.last_rate =
+ info->status.rates[rates_idx];
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
@@ -830,13 +833,15 @@
return;
} else {
if (!acked)
- sta->tx_retry_failed++;
- sta->tx_retry_count += retry_count;
+ sta->status_stats.retry_failed++;
+ sta->status_stats.retry_count += retry_count;
if (ieee80211_is_data_present(fc)) {
if (!acked)
- sta->tx_msdu_failed[tid]++;
- sta->tx_msdu_retries[tid] += retry_count;
+ sta->status_stats.msdu_failed[tid]++;
+
+ sta->status_stats.msdu_retries[tid] +=
+ retry_count;
}
}
@@ -854,19 +859,17 @@
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
if (info->flags & IEEE80211_TX_STAT_ACK) {
- if (sta->lost_packets)
- sta->lost_packets = 0;
+ if (sta->status_stats.lost_packets)
+ sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->last_tdls_pkt_time = jiffies;
+ sta->status_stats.last_tdls_pkt_time =
+ jiffies;
} else {
ieee80211_lost_packet(sta, info);
}
}
-
- if (acked)
- sta->last_ack_signal = info->status.ack_signal;
}
rcu_read_unlock();
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 314e3bd7..5cf8f4e 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -325,7 +325,6 @@
__field(u32, flags)
__field(int, power_level)
__field(int, dynamic_ps_timeout)
- __field(int, max_sleep_period)
__field(u16, listen_interval)
__field(u8, long_frame_max_tx_count)
__field(u8, short_frame_max_tx_count)
@@ -339,7 +338,6 @@
__entry->flags = local->hw.conf.flags;
__entry->power_level = local->hw.conf.power_level;
__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
- __entry->max_sleep_period = local->hw.conf.max_sleep_period;
__entry->listen_interval = local->hw.conf.listen_interval;
__entry->long_frame_max_tx_count =
local->hw.conf.long_frame_max_tx_count;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3478a83..bdc224d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -757,9 +757,9 @@
if (txrc.reported_rate.idx < 0) {
txrc.reported_rate = tx->rate;
if (tx->sta && ieee80211_is_data(hdr->frame_control))
- tx->sta->last_tx_rate = txrc.reported_rate;
+ tx->sta->tx_stats.last_rate = txrc.reported_rate;
} else if (tx->sta)
- tx->sta->last_tx_rate = txrc.reported_rate;
+ tx->sta->tx_stats.last_rate = txrc.reported_rate;
if (ratetbl)
return TX_CONTINUE;
@@ -824,7 +824,7 @@
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
tx->sdata->sequence_number += 0x10;
if (tx->sta)
- tx->sta->tx_msdu[IEEE80211_NUM_TIDS]++;
+ tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
return TX_CONTINUE;
}
@@ -840,7 +840,7 @@
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- tx->sta->tx_msdu[tid]++;
+ tx->sta->tx_stats.msdu[tid]++;
if (!tx->sta->sta.txq[0])
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
@@ -994,10 +994,10 @@
skb_queue_walk(&tx->skbs, skb) {
ac = skb_get_queue_mapping(skb);
- tx->sta->tx_bytes[ac] += skb->len;
+ tx->sta->tx_stats.bytes[ac] += skb->len;
}
if (ac >= 0)
- tx->sta->tx_packets[ac]++;
+ tx->sta->tx_stats.packets[ac]++;
return TX_CONTINUE;
}
@@ -2779,10 +2779,10 @@
}
if (skb_shinfo(skb)->gso_size)
- sta->tx_msdu[tid] +=
+ sta->tx_stats.msdu[tid] +=
DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
else
- sta->tx_msdu[tid]++;
+ sta->tx_stats.msdu[tid]++;
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
@@ -2813,8 +2813,8 @@
/* statistics normally done by ieee80211_tx_h_stats (but that
* has to consider fragmentation, so is more complex)
*/
- sta->tx_bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_packets[skb_get_queue_mapping(skb)]++;
+ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
if (fast_tx->pn_offs) {
u64 pn;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 60c4dbf..8274c86 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1951,7 +1951,7 @@
}
}
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
/*
* The sta might be in psm against the ap (e.g. because
@@ -2042,9 +2042,13 @@
if (sched_scan_sdata && sched_scan_req)
/*
* Sched scan stopped, but we don't want to report it. Instead,
- * we're trying to reschedule.
+ * we're trying to reschedule. However, if more than one scan
+ * plan was set, we cannot reschedule since we don't know which
+ * scan plan was currently running (and some scan plans may have
+ * already finished).
*/
- if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
+ if (sched_scan_req->n_scan_plans > 1 ||
+ __ieee80211_request_sched_scan_start(sched_scan_sdata,
sched_scan_req))
sched_scan_stopped = true;
mutex_unlock(&local->mtx);
@@ -3301,9 +3305,11 @@
if (sta) {
txqi->txq.sta = &sta->sta;
sta->sta.txq[tid] = &txqi->txq;
+ txqi->txq.tid = tid;
txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
} else {
sdata->vif.txq = &txqi->txq;
+ txqi->txq.tid = 0;
txqi->txq.ac = IEEE80211_AC_BE;
}
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index feb547d..d824c38 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -174,9 +174,12 @@
* a driver that supports HW encryption. Send up the key idx only if
* the key is set.
*/
- mac80211_ev_michael_mic_failure(rx->sdata,
- rx->key ? rx->key->conf.keyidx : -1,
- (void *) skb->data, NULL, GFP_ATOMIC);
+ cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2,
+ is_multicast_ether_addr(hdr->addr1) ?
+ NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE,
+ rx->key ? rx->key->conf.keyidx : -1,
+ NULL, GFP_ATOMIC);
return RX_DROP_UNUSABLE;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index bb185a2..c70d750 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -19,36 +19,13 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#endif
+#include <net/nexthop.h>
#include "internal.h"
-#define LABEL_NOT_SPECIFIED (1<<20)
-#define MAX_NEW_LABELS 2
-
-/* This maximum ha length copied from the definition of struct neighbour */
-#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
-
-enum mpls_payload_type {
- MPT_UNSPEC, /* IPv4 or IPv6 */
- MPT_IPV4 = 4,
- MPT_IPV6 = 6,
-
- /* Other types not implemented:
- * - Pseudo-wire with or without control word (RFC4385)
- * - GAL (RFC5586)
- */
-};
-
-struct mpls_route { /* next hop label forwarding entry */
- struct net_device __rcu *rt_dev;
- struct rcu_head rt_rcu;
- u32 rt_label[MAX_NEW_LABELS];
- u8 rt_protocol; /* routing protocol that set this entry */
- u8 rt_payload_type;
- u8 rt_labels;
- u8 rt_via_alen;
- u8 rt_via_table;
- u8 rt_via[0];
-};
+/* Maximum number of labels to look ahead at when selecting a path of
+ * a multipath route
+ */
+#define MAX_MP_SELECT_LABELS 4
static int zero = 0;
static int label_limit = (1 << 20) - 1;
@@ -80,10 +57,24 @@
}
EXPORT_SYMBOL_GPL(mpls_output_possible);
-static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
+static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
+{
+ u8 *nh0_via = PTR_ALIGN((u8 *)&rt->rt_nh[rt->rt_nhn], VIA_ALEN_ALIGN);
+ int nh_index = nh - rt->rt_nh;
+
+ return nh0_via + rt->rt_max_alen * nh_index;
+}
+
+static const u8 *mpls_nh_via(const struct mpls_route *rt,
+ const struct mpls_nh *nh)
+{
+ return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
+}
+
+static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
{
/* The size of the layer 2.5 labels to be added for this route */
- return rt->rt_labels * sizeof(struct mpls_shim_hdr);
+ return nh->nh_labels * sizeof(struct mpls_shim_hdr);
}
unsigned int mpls_dev_mtu(const struct net_device *dev)
@@ -105,6 +96,80 @@
}
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
+static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+ struct sk_buff *skb, bool bos)
+{
+ struct mpls_entry_decoded dec;
+ struct mpls_shim_hdr *hdr;
+ bool eli_seen = false;
+ int label_index;
+ int nh_index = 0;
+ u32 hash = 0;
+
+ /* No need to look further into packet if there's only
+ * one path
+ */
+ if (rt->rt_nhn == 1)
+ goto out;
+
+ for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+ label_index++) {
+ if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+ break;
+
+ /* Read and decode the current label */
+ hdr = mpls_hdr(skb) + label_index;
+ dec = mpls_entry_decode(hdr);
+
+ /* RFC6790 - reserved labels MUST NOT be used as keys
+ * for the load-balancing function
+ */
+ if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
+ hash = jhash_1word(dec.label, hash);
+
+ /* The entropy label follows the entropy label
+ * indicator, so this means that the entropy
+ * label was just added to the hash - no need to
+ * go any deeper either in the label stack or in the
+ * payload
+ */
+ if (eli_seen)
+ break;
+ } else if (dec.label == MPLS_LABEL_ENTROPY) {
+ eli_seen = true;
+ }
+
+ bos = dec.bos;
+ if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
+ sizeof(struct iphdr))) {
+ const struct iphdr *v4hdr;
+
+ v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
+ label_index);
+ if (v4hdr->version == 4) {
+ hash = jhash_3words(ntohl(v4hdr->saddr),
+ ntohl(v4hdr->daddr),
+ v4hdr->protocol, hash);
+ } else if (v4hdr->version == 6 &&
+ pskb_may_pull(skb, sizeof(*hdr) * label_index +
+ sizeof(struct ipv6hdr))) {
+ const struct ipv6hdr *v6hdr;
+
+ v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
+ label_index);
+
+ hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
+ hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
+ hash = jhash_1word(v6hdr->nexthdr, hash);
+ }
+ }
+ }
+
+ nh_index = hash % rt->rt_nhn;
+out:
+ return &rt->rt_nh[nh_index];
+}
+
static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
struct mpls_entry_decoded dec)
{
@@ -159,6 +224,7 @@
struct net *net = dev_net(dev);
struct mpls_shim_hdr *hdr;
struct mpls_route *rt;
+ struct mpls_nh *nh;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
struct mpls_dev *mdev;
@@ -196,8 +262,12 @@
if (!rt)
goto drop;
+ nh = mpls_select_multipath(rt, skb, dec.bos);
+ if (!nh)
+ goto drop;
+
/* Find the output device */
- out_dev = rcu_dereference(rt->rt_dev);
+ out_dev = rcu_dereference(nh->nh_dev);
if (!mpls_output_possible(out_dev))
goto drop;
@@ -212,7 +282,7 @@
dec.ttl -= 1;
/* Verify the destination can hold the packet */
- new_header_size = mpls_rt_header_size(rt);
+ new_header_size = mpls_nh_header_size(nh);
mtu = mpls_dev_mtu(out_dev);
if (mpls_pkt_too_big(skb, mtu - new_header_size))
goto drop;
@@ -240,13 +310,14 @@
/* Push the new labels */
hdr = mpls_hdr(skb);
bos = dec.bos;
- for (i = rt->rt_labels - 1; i >= 0; i--) {
- hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
+ for (i = nh->nh_labels - 1; i >= 0; i--) {
+ hdr[i] = mpls_entry_encode(nh->nh_label[i],
+ dec.ttl, 0, bos);
bos = false;
}
}
- err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
+ err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb);
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
@@ -270,24 +341,33 @@
struct mpls_route_config {
u32 rc_protocol;
u32 rc_ifindex;
- u16 rc_via_table;
- u16 rc_via_alen;
+ u8 rc_via_table;
+ u8 rc_via_alen;
u8 rc_via[MAX_VIA_ALEN];
u32 rc_label;
- u32 rc_output_labels;
+ u8 rc_output_labels;
u32 rc_output_label[MAX_NEW_LABELS];
u32 rc_nlflags;
enum mpls_payload_type rc_payload_type;
struct nl_info rc_nlinfo;
+ struct rtnexthop *rc_mp;
+ int rc_mp_len;
};
-static struct mpls_route *mpls_rt_alloc(size_t alen)
+static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen)
{
+ u8 max_alen_aligned = ALIGN(max_alen, VIA_ALEN_ALIGN);
struct mpls_route *rt;
- rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL);
- if (rt)
- rt->rt_via_alen = alen;
+ rt = kzalloc(ALIGN(sizeof(*rt) + num_nh * sizeof(*rt->rt_nh),
+ VIA_ALEN_ALIGN) +
+ num_nh * max_alen_aligned,
+ GFP_KERNEL);
+ if (rt) {
+ rt->rt_nhn = num_nh;
+ rt->rt_max_alen = max_alen_aligned;
+ }
+
return rt;
}
@@ -312,25 +392,22 @@
}
static void mpls_route_update(struct net *net, unsigned index,
- struct net_device *dev, struct mpls_route *new,
+ struct mpls_route *new,
const struct nl_info *info)
{
struct mpls_route __rcu **platform_label;
- struct mpls_route *rt, *old = NULL;
+ struct mpls_route *rt;
ASSERT_RTNL();
platform_label = rtnl_dereference(net->mpls.platform_label);
rt = rtnl_dereference(platform_label[index]);
- if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) {
- rcu_assign_pointer(platform_label[index], new);
- old = rt;
- }
+ rcu_assign_pointer(platform_label[index], new);
- mpls_notify_route(net, index, old, new, info);
+ mpls_notify_route(net, index, rt, new, info);
/* If we removed a route free it now */
- mpls_rt_free(old);
+ mpls_rt_free(rt);
}
static unsigned find_free_label(struct net *net)
@@ -350,7 +427,8 @@
}
#if IS_ENABLED(CONFIG_INET)
-static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet_fib_lookup_dev(struct net *net,
+ const void *addr)
{
struct net_device *dev;
struct rtable *rt;
@@ -369,14 +447,16 @@
return dev;
}
#else
-static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet_fib_lookup_dev(struct net *net,
+ const void *addr)
{
return ERR_PTR(-EAFNOSUPPORT);
}
#endif
#if IS_ENABLED(CONFIG_IPV6)
-static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet6_fib_lookup_dev(struct net *net,
+ const void *addr)
{
struct net_device *dev;
struct dst_entry *dst;
@@ -399,47 +479,234 @@
return dev;
}
#else
-static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet6_fib_lookup_dev(struct net *net,
+ const void *addr)
{
return ERR_PTR(-EAFNOSUPPORT);
}
#endif
static struct net_device *find_outdev(struct net *net,
- struct mpls_route_config *cfg)
+ struct mpls_route *rt,
+ struct mpls_nh *nh, int oif)
{
struct net_device *dev = NULL;
- if (!cfg->rc_ifindex) {
- switch (cfg->rc_via_table) {
+ if (!oif) {
+ switch (nh->nh_via_table) {
case NEIGH_ARP_TABLE:
- dev = inet_fib_lookup_dev(net, cfg->rc_via);
+ dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
break;
case NEIGH_ND_TABLE:
- dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+ dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
break;
case NEIGH_LINK_TABLE:
break;
}
} else {
- dev = dev_get_by_index(net, cfg->rc_ifindex);
+ dev = dev_get_by_index(net, oif);
}
if (!dev)
return ERR_PTR(-ENODEV);
+ /* The caller is holding rtnl anyways, so release the dev reference */
+ dev_put(dev);
+
return dev;
}
+static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
+ struct mpls_nh *nh, int oif)
+{
+ struct net_device *dev = NULL;
+ int err = -ENODEV;
+
+ dev = find_outdev(net, rt, nh, oif);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ dev = NULL;
+ goto errout;
+ }
+
+ /* Ensure this is a supported device */
+ err = -EINVAL;
+ if (!mpls_dev_get(dev))
+ goto errout;
+
+ RCU_INIT_POINTER(nh->nh_dev, dev);
+
+ return 0;
+
+errout:
+ return err;
+}
+
+static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
+ struct mpls_route *rt)
+{
+ struct net *net = cfg->rc_nlinfo.nl_net;
+ struct mpls_nh *nh = rt->rt_nh;
+ int err;
+ int i;
+
+ if (!nh)
+ return -ENOMEM;
+
+ err = -EINVAL;
+ /* Ensure only a supported number of labels are present */
+ if (cfg->rc_output_labels > MAX_NEW_LABELS)
+ goto errout;
+
+ nh->nh_labels = cfg->rc_output_labels;
+ for (i = 0; i < nh->nh_labels; i++)
+ nh->nh_label[i] = cfg->rc_output_label[i];
+
+ nh->nh_via_table = cfg->rc_via_table;
+ memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
+ nh->nh_via_alen = cfg->rc_via_alen;
+
+ err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
+ if (err)
+ goto errout;
+
+ return 0;
+
+errout:
+ return err;
+}
+
+static int mpls_nh_build(struct net *net, struct mpls_route *rt,
+ struct mpls_nh *nh, int oif,
+ struct nlattr *via, struct nlattr *newdst)
+{
+ int err = -ENOMEM;
+
+ if (!nh)
+ goto errout;
+
+ if (newdst) {
+ err = nla_get_labels(newdst, MAX_NEW_LABELS,
+ &nh->nh_labels, nh->nh_label);
+ if (err)
+ goto errout;
+ }
+
+ err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
+ __mpls_nh_via(rt, nh));
+ if (err)
+ goto errout;
+
+ err = mpls_nh_assign_dev(net, rt, nh, oif);
+ if (err)
+ goto errout;
+
+ return 0;
+
+errout:
+ return err;
+}
+
+static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
+ u8 cfg_via_alen, u8 *max_via_alen)
+{
+ int nhs = 0;
+ int remaining = len;
+
+ if (!rtnh) {
+ *max_via_alen = cfg_via_alen;
+ return 1;
+ }
+
+ *max_via_alen = 0;
+
+ while (rtnh_ok(rtnh, remaining)) {
+ struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+ int attrlen;
+
+ attrlen = rtnh_attrlen(rtnh);
+ nla = nla_find(attrs, attrlen, RTA_VIA);
+ if (nla && nla_len(nla) >=
+ offsetof(struct rtvia, rtvia_addr)) {
+ int via_alen = nla_len(nla) -
+ offsetof(struct rtvia, rtvia_addr);
+
+ if (via_alen <= MAX_VIA_ALEN)
+ *max_via_alen = max_t(u16, *max_via_alen,
+ via_alen);
+ }
+
+ nhs++;
+ rtnh = rtnh_next(rtnh, &remaining);
+ }
+
+ /* leftover implies invalid nexthop configuration, discard it */
+ return remaining > 0 ? 0 : nhs;
+}
+
+static int mpls_nh_build_multi(struct mpls_route_config *cfg,
+ struct mpls_route *rt)
+{
+ struct rtnexthop *rtnh = cfg->rc_mp;
+ struct nlattr *nla_via, *nla_newdst;
+ int remaining = cfg->rc_mp_len;
+ int nhs = 0;
+ int err = 0;
+
+ change_nexthops(rt) {
+ int attrlen;
+
+ nla_via = NULL;
+ nla_newdst = NULL;
+
+ err = -EINVAL;
+ if (!rtnh_ok(rtnh, remaining))
+ goto errout;
+
+ /* neither weighted multipath nor any flags
+ * are supported
+ */
+ if (rtnh->rtnh_hops || rtnh->rtnh_flags)
+ goto errout;
+
+ attrlen = rtnh_attrlen(rtnh);
+ if (attrlen > 0) {
+ struct nlattr *attrs = rtnh_attrs(rtnh);
+
+ nla_via = nla_find(attrs, attrlen, RTA_VIA);
+ nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
+ }
+
+ if (!nla_via)
+ goto errout;
+
+ err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
+ rtnh->rtnh_ifindex, nla_via,
+ nla_newdst);
+ if (err)
+ goto errout;
+
+ rtnh = rtnh_next(rtnh, &remaining);
+ nhs++;
+ } endfor_nexthops(rt);
+
+ rt->rt_nhn = nhs;
+
+ return 0;
+
+errout:
+ return err;
+}
+
static int mpls_route_add(struct mpls_route_config *cfg)
{
struct mpls_route __rcu **platform_label;
struct net *net = cfg->rc_nlinfo.nl_net;
- struct net_device *dev = NULL;
struct mpls_route *rt, *old;
- unsigned index;
- int i;
int err = -EINVAL;
+ u8 max_via_alen;
+ unsigned index;
+ int nhs;
index = cfg->rc_label;
@@ -457,27 +724,6 @@
if (index >= net->mpls.platform_labels)
goto errout;
- /* Ensure only a supported number of labels are present */
- if (cfg->rc_output_labels > MAX_NEW_LABELS)
- goto errout;
-
- dev = find_outdev(net, cfg);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
- dev = NULL;
- goto errout;
- }
-
- /* Ensure this is a supported device */
- err = -EINVAL;
- if (!mpls_dev_get(dev))
- goto errout;
-
- err = -EINVAL;
- if ((cfg->rc_via_table == NEIGH_LINK_TABLE) &&
- (dev->addr_len != cfg->rc_via_alen))
- goto errout;
-
/* Append makes no sense with mpls */
err = -EOPNOTSUPP;
if (cfg->rc_nlflags & NLM_F_APPEND)
@@ -497,28 +743,34 @@
if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
goto errout;
+ err = -EINVAL;
+ nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
+ cfg->rc_via_alen, &max_via_alen);
+ if (nhs == 0)
+ goto errout;
+
err = -ENOMEM;
- rt = mpls_rt_alloc(cfg->rc_via_alen);
+ rt = mpls_rt_alloc(nhs, max_via_alen);
if (!rt)
goto errout;
- rt->rt_labels = cfg->rc_output_labels;
- for (i = 0; i < rt->rt_labels; i++)
- rt->rt_label[i] = cfg->rc_output_label[i];
rt->rt_protocol = cfg->rc_protocol;
- RCU_INIT_POINTER(rt->rt_dev, dev);
rt->rt_payload_type = cfg->rc_payload_type;
- rt->rt_via_table = cfg->rc_via_table;
- memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
- mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo);
+ if (cfg->rc_mp)
+ err = mpls_nh_build_multi(cfg, rt);
+ else
+ err = mpls_nh_build_from_cfg(cfg, rt);
+ if (err)
+ goto freert;
- dev_put(dev);
+ mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
+
return 0;
+freert:
+ mpls_rt_free(rt);
errout:
- if (dev)
- dev_put(dev);
return err;
}
@@ -538,7 +790,7 @@
if (index >= net->mpls.platform_labels)
goto errout;
- mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo);
+ mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
err = 0;
errout:
@@ -635,9 +887,11 @@
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
if (!rt)
continue;
- if (rtnl_dereference(rt->rt_dev) != dev)
- continue;
- rt->rt_dev = NULL;
+ for_nexthops(rt) {
+ if (rtnl_dereference(nh->nh_dev) != dev)
+ continue;
+ nh->nh_dev = NULL;
+ } endfor_nexthops(rt);
}
mdev = mpls_dev_get(dev);
@@ -736,7 +990,7 @@
EXPORT_SYMBOL_GPL(nla_put_labels);
int nla_get_labels(const struct nlattr *nla,
- u32 max_labels, u32 *labels, u32 label[])
+ u32 max_labels, u8 *labels, u32 label[])
{
unsigned len = nla_len(nla);
unsigned nla_labels;
@@ -781,6 +1035,48 @@
}
EXPORT_SYMBOL_GPL(nla_get_labels);
+int nla_get_via(const struct nlattr *nla, u8 *via_alen,
+ u8 *via_table, u8 via_addr[])
+{
+ struct rtvia *via = nla_data(nla);
+ int err = -EINVAL;
+ int alen;
+
+ if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+ goto errout;
+ alen = nla_len(nla) -
+ offsetof(struct rtvia, rtvia_addr);
+ if (alen > MAX_VIA_ALEN)
+ goto errout;
+
+ /* Validate the address family */
+ switch (via->rtvia_family) {
+ case AF_PACKET:
+ *via_table = NEIGH_LINK_TABLE;
+ break;
+ case AF_INET:
+ *via_table = NEIGH_ARP_TABLE;
+ if (alen != 4)
+ goto errout;
+ break;
+ case AF_INET6:
+ *via_table = NEIGH_ND_TABLE;
+ if (alen != 16)
+ goto errout;
+ break;
+ default:
+ /* Unsupported address family */
+ goto errout;
+ }
+
+ memcpy(via_addr, via->rtvia_addr, alen);
+ *via_alen = alen;
+ err = 0;
+
+errout:
+ return err;
+}
+
static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
struct mpls_route_config *cfg)
{
@@ -844,7 +1140,7 @@
break;
case RTA_DST:
{
- u32 label_count;
+ u8 label_count;
if (nla_get_labels(nla, 1, &label_count,
&cfg->rc_label))
goto errout;
@@ -857,35 +1153,15 @@
}
case RTA_VIA:
{
- struct rtvia *via = nla_data(nla);
- if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+ if (nla_get_via(nla, &cfg->rc_via_alen,
+ &cfg->rc_via_table, cfg->rc_via))
goto errout;
- cfg->rc_via_alen = nla_len(nla) -
- offsetof(struct rtvia, rtvia_addr);
- if (cfg->rc_via_alen > MAX_VIA_ALEN)
- goto errout;
-
- /* Validate the address family */
- switch(via->rtvia_family) {
- case AF_PACKET:
- cfg->rc_via_table = NEIGH_LINK_TABLE;
- break;
- case AF_INET:
- cfg->rc_via_table = NEIGH_ARP_TABLE;
- if (cfg->rc_via_alen != 4)
- goto errout;
- break;
- case AF_INET6:
- cfg->rc_via_table = NEIGH_ND_TABLE;
- if (cfg->rc_via_alen != 16)
- goto errout;
- break;
- default:
- /* Unsupported address family */
- goto errout;
- }
-
- memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen);
+ break;
+ }
+ case RTA_MULTIPATH:
+ {
+ cfg->rc_mp = nla_data(nla);
+ cfg->rc_mp_len = nla_len(nla);
break;
}
default:
@@ -946,16 +1222,52 @@
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
- if (rt->rt_labels &&
- nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label))
- goto nla_put_failure;
- if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen))
- goto nla_put_failure;
- dev = rtnl_dereference(rt->rt_dev);
- if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
- goto nla_put_failure;
if (nla_put_labels(skb, RTA_DST, 1, &label))
goto nla_put_failure;
+ if (rt->rt_nhn == 1) {
+ const struct mpls_nh *nh = rt->rt_nh;
+
+ if (nh->nh_labels &&
+ nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
+ nh->nh_label))
+ goto nla_put_failure;
+ if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
+ nh->nh_via_alen))
+ goto nla_put_failure;
+ dev = rtnl_dereference(nh->nh_dev);
+ if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+ goto nla_put_failure;
+ } else {
+ struct rtnexthop *rtnh;
+ struct nlattr *mp;
+
+ mp = nla_nest_start(skb, RTA_MULTIPATH);
+ if (!mp)
+ goto nla_put_failure;
+
+ for_nexthops(rt) {
+ rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+ if (!rtnh)
+ goto nla_put_failure;
+
+ dev = rtnl_dereference(nh->nh_dev);
+ if (dev)
+ rtnh->rtnh_ifindex = dev->ifindex;
+ if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
+ nh->nh_labels,
+ nh->nh_label))
+ goto nla_put_failure;
+ if (nla_put_via(skb, nh->nh_via_table,
+ mpls_nh_via(rt, nh),
+ nh->nh_via_alen))
+ goto nla_put_failure;
+
+ /* length of rtnetlink header + attributes */
+ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+ } endfor_nexthops(rt);
+
+ nla_nest_end(skb, mp);
+ }
nlmsg_end(skb, nlh);
return 0;
@@ -1000,12 +1312,30 @@
{
size_t payload =
NLMSG_ALIGN(sizeof(struct rtmsg))
- + nla_total_size(2 + rt->rt_via_alen) /* RTA_VIA */
+ nla_total_size(4); /* RTA_DST */
- if (rt->rt_labels) /* RTA_NEWDST */
- payload += nla_total_size(rt->rt_labels * 4);
- if (rt->rt_dev) /* RTA_OIF */
- payload += nla_total_size(4);
+
+ if (rt->rt_nhn == 1) {
+ struct mpls_nh *nh = rt->rt_nh;
+
+ if (nh->nh_dev)
+ payload += nla_total_size(4); /* RTA_OIF */
+ payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */
+ if (nh->nh_labels) /* RTA_NEWDST */
+ payload += nla_total_size(nh->nh_labels * 4);
+ } else {
+ /* each nexthop is packed in an attribute */
+ size_t nhsize = 0;
+
+ for_nexthops(rt) {
+ nhsize += nla_total_size(sizeof(struct rtnexthop));
+ nhsize += nla_total_size(2 + nh->nh_via_alen);
+ if (nh->nh_labels)
+ nhsize += nla_total_size(nh->nh_labels * 4);
+ } endfor_nexthops(rt);
+ /* nested attribute */
+ payload += nla_total_size(nhsize);
+ }
+
return payload;
}
@@ -1057,25 +1387,29 @@
/* In case the predefined labels need to be populated */
if (limit > MPLS_LABEL_IPV4NULL) {
struct net_device *lo = net->loopback_dev;
- rt0 = mpls_rt_alloc(lo->addr_len);
+ rt0 = mpls_rt_alloc(1, lo->addr_len);
if (!rt0)
goto nort0;
- RCU_INIT_POINTER(rt0->rt_dev, lo);
+ RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
rt0->rt_protocol = RTPROT_KERNEL;
rt0->rt_payload_type = MPT_IPV4;
- rt0->rt_via_table = NEIGH_LINK_TABLE;
- memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
+ rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+ rt0->rt_nh->nh_via_alen = lo->addr_len;
+ memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
+ lo->addr_len);
}
if (limit > MPLS_LABEL_IPV6NULL) {
struct net_device *lo = net->loopback_dev;
- rt2 = mpls_rt_alloc(lo->addr_len);
+ rt2 = mpls_rt_alloc(1, lo->addr_len);
if (!rt2)
goto nort2;
- RCU_INIT_POINTER(rt2->rt_dev, lo);
+ RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
rt2->rt_protocol = RTPROT_KERNEL;
rt2->rt_payload_type = MPT_IPV6;
- rt2->rt_via_table = NEIGH_LINK_TABLE;
- memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
+ rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+ rt2->rt_nh->nh_via_alen = lo->addr_len;
+ memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
+ lo->addr_len);
}
rtnl_lock();
@@ -1085,7 +1419,7 @@
/* Free any labels beyond the new table */
for (index = limit; index < old_limit; index++)
- mpls_route_update(net, index, NULL, NULL, NULL);
+ mpls_route_update(net, index, NULL, NULL);
/* Copy over the old labels */
cp_size = size;
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 2681a4b..bde52ce 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -21,6 +21,76 @@
struct sk_buff;
+#define LABEL_NOT_SPECIFIED (1 << 20)
+#define MAX_NEW_LABELS 2
+
+/* This maximum ha length copied from the definition of struct neighbour */
+#define VIA_ALEN_ALIGN sizeof(unsigned long)
+#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, VIA_ALEN_ALIGN))
+
+enum mpls_payload_type {
+ MPT_UNSPEC, /* IPv4 or IPv6 */
+ MPT_IPV4 = 4,
+ MPT_IPV6 = 6,
+
+ /* Other types not implemented:
+ * - Pseudo-wire with or without control word (RFC4385)
+ * - GAL (RFC5586)
+ */
+};
+
+struct mpls_nh { /* next hop label forwarding entry */
+ struct net_device __rcu *nh_dev;
+ u32 nh_label[MAX_NEW_LABELS];
+ u8 nh_labels;
+ u8 nh_via_alen;
+ u8 nh_via_table;
+};
+
+/* The route, nexthops and vias are stored together in the same memory
+ * block:
+ *
+ * +----------------------+
+ * | mpls_route |
+ * +----------------------+
+ * | mpls_nh 0 |
+ * +----------------------+
+ * | ... |
+ * +----------------------+
+ * | mpls_nh n-1 |
+ * +----------------------+
+ * | alignment padding |
+ * +----------------------+
+ * | via[rt_max_alen] 0 |
+ * +----------------------+
+ * | ... |
+ * +----------------------+
+ * | via[rt_max_alen] n-1 |
+ * +----------------------+
+ */
+struct mpls_route { /* next hop label forwarding entry */
+ struct rcu_head rt_rcu;
+ u8 rt_protocol;
+ u8 rt_payload_type;
+ u8 rt_max_alen;
+ unsigned int rt_nhn;
+ struct mpls_nh rt_nh[0];
+};
+
+#define for_nexthops(rt) { \
+ int nhsel; struct mpls_nh *nh; \
+ for (nhsel = 0, nh = (rt)->rt_nh; \
+ nhsel < (rt)->rt_nhn; \
+ nh++, nhsel++)
+
+#define change_nexthops(rt) { \
+ int nhsel; struct mpls_nh *nh; \
+ for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh); \
+ nhsel < (rt)->rt_nhn; \
+ nh++, nhsel++)
+
+#define endfor_nexthops(rt) }
+
static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
{
return (struct mpls_shim_hdr *)skb_network_header(skb);
@@ -52,8 +122,10 @@
int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels,
u32 label[]);
+int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
+ u8 via[]);
bool mpls_output_possible(const struct net_device *dev);
unsigned int mpls_dev_mtu(const struct net_device *dev);
bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 09e661c..f39276d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -152,6 +152,8 @@
#endif
synchronize_net();
nf_queue_nf_hook_drop(net, &entry->ops);
+ /* other cpu might still process nfqueue verdict that used reg */
+ synchronize_net();
kfree(entry);
}
EXPORT_SYMBOL(nf_unregister_net_hook);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a1fe537..5a30ce6 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -297,7 +297,7 @@
ip_set_timeout_expired(ext_timeout(n, set))))
n = NULL;
- e = kzalloc(set->dsize, GFP_KERNEL);
+ e = kzalloc(set->dsize, GFP_ATOMIC);
if (!e)
return -ENOMEM;
e->id = d->id;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0a49a8c..fafe33b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2371,7 +2371,7 @@
int pos, idx, shift;
err = 0;
- netlink_table_grab();
+ netlink_lock_table();
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
if (len - pos < sizeof(u32))
break;
@@ -2386,7 +2386,7 @@
}
if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
err = -EFAULT;
- netlink_table_ungrab();
+ netlink_unlock_table();
break;
}
case NETLINK_CAP_ACK:
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c608723..221fa8b 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -769,7 +769,6 @@
struct sw_flow_key *key, const struct nlattr *attr,
const struct nlattr *actions, int actions_len)
{
- struct ip_tunnel_info info;
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
@@ -797,11 +796,9 @@
if (vport) {
int err;
- upcall.egress_tun_info = &info;
- err = ovs_vport_get_egress_tun_info(vport, skb,
- &upcall);
- if (err)
- upcall.egress_tun_info = NULL;
+ err = dev_fill_metadata_dst(vport->dev, skb);
+ if (!err)
+ upcall.egress_tun_info = skb_tunnel_info(skb);
}
break;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 9ed833e..bd165ee 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -151,6 +151,8 @@
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
state = ovs_ct_get_state(ctinfo);
+ if (!nf_ct_is_confirmed(ct))
+ state |= OVS_CS_F_NEW;
if (ct->master)
state |= OVS_CS_F_RELATED;
zone = nf_ct_zone(ct);
@@ -222,9 +224,6 @@
struct nf_conn *ct;
int err;
- if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
- return -ENOTSUPP;
-
/* The connection could be invalid, in which case set_label is no-op.*/
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
@@ -377,7 +376,7 @@
return true;
}
-static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
+static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb)
{
@@ -408,6 +407,8 @@
}
}
+ ovs_ct_update_key(skb, key, true);
+
return 0;
}
@@ -430,8 +431,6 @@
err = __ovs_ct_lookup(net, key, info, skb);
if (err)
return err;
-
- ovs_ct_update_key(skb, key, true);
}
return 0;
@@ -460,8 +459,6 @@
if (nf_conntrack_confirm(skb) != NF_ACCEPT)
return -EINVAL;
- ovs_ct_update_key(skb, key, true);
-
return 0;
}
@@ -587,6 +584,10 @@
case OVS_CT_ATTR_MARK: {
struct md_mark *mark = nla_data(a);
+ if (!mark->mask) {
+ OVS_NLERR(log, "ct_mark mask cannot be 0");
+ return -EINVAL;
+ }
info->mark = *mark;
break;
}
@@ -595,6 +596,10 @@
case OVS_CT_ATTR_LABELS: {
struct md_labels *labels = nla_data(a);
+ if (!labels_nonzero(&labels->mask)) {
+ OVS_NLERR(log, "ct_labels mask cannot be 0");
+ return -EINVAL;
+ }
info->labels = *labels;
break;
}
@@ -705,11 +710,12 @@
if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
return -EMSGSIZE;
- if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
&ct_info->mark))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&ct_info->labels.mask) &&
nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
&ct_info->labels))
return -EMSGSIZE;
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
index da87149..82e0dfc 100644
--- a/net/openvswitch/conntrack.h
+++ b/net/openvswitch/conntrack.h
@@ -35,12 +35,9 @@
int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
void ovs_ct_free_action(const struct nlattr *a);
-static inline bool ovs_ct_state_supported(u32 state)
-{
- return !(state & ~(OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED |
- OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR |
- OVS_CS_F_INVALID | OVS_CS_F_TRACKED));
-}
+#define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
+ OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | \
+ OVS_CS_F_INVALID | OVS_CS_F_TRACKED)
#else
#include <linux/errno.h>
@@ -53,11 +50,6 @@
return false;
}
-static inline bool ovs_ct_state_supported(u32 state)
-{
- return false;
-}
-
static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla,
const struct sw_flow_key *key,
struct sw_flow_actions **acts, bool log)
@@ -94,5 +86,7 @@
}
static inline void ovs_ct_free_action(const struct nlattr *a) { }
+
+#define CT_SUPPORTED_MASK 0
#endif /* CONFIG_NF_CONNTRACK */
#endif /* ovs_conntrack.h */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a758280..5633172 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -489,9 +489,8 @@
if (upcall_info->egress_tun_info) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
- err = ovs_nla_put_egress_tunnel_key(user_skb,
- upcall_info->egress_tun_info,
- upcall_info->egress_tun_opts);
+ err = ovs_nla_put_tunnel_info(user_skb,
+ upcall_info->egress_tun_info);
BUG_ON(err);
nla_nest_end(user_skb, nla);
}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index f88038a..67bdecd 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -117,7 +117,6 @@
*/
struct dp_upcall_info {
struct ip_tunnel_info *egress_tun_info;
- const void *egress_tun_opts;
const struct nlattr *userdata;
const struct nlattr *actions;
int actions_len;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 6799c8d..907d6fd 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -548,11 +548,11 @@
struct sw_flow_match *match, bool is_mask,
bool log)
{
+ bool ttl = false, ipv4 = false, ipv6 = false;
+ __be16 tun_flags = 0;
+ int opts_type = 0;
struct nlattr *a;
int rem;
- bool ttl = false;
- __be16 tun_flags = 0, ipv4 = false, ipv6 = false;
- int opts_type = 0;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
@@ -764,7 +764,7 @@
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
- if (tun_opts) {
+ if (swkey_tun_opts_len) {
if (output->tun_flags & TUNNEL_GENEVE_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
@@ -798,14 +798,13 @@
return 0;
}
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
- const struct ip_tunnel_info *egress_tun_info,
- const void *egress_tun_opts)
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
{
- return __ip_tun_to_nlattr(skb, &egress_tun_info->key,
- egress_tun_opts,
- egress_tun_info->options_len,
- ip_tunnel_info_af(egress_tun_info));
+ return __ip_tun_to_nlattr(skb, &tun_info->key,
+ ip_tunnel_info_opts(tun_info),
+ tun_info->options_len,
+ ip_tunnel_info_af(tun_info));
}
static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
@@ -866,7 +865,7 @@
ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
- if (!is_mask && !ovs_ct_state_supported(ct_state)) {
+ if (ct_state & ~CT_SUPPORTED_MASK) {
OVS_NLERR(log, "ct_state flags %08x unsupported",
ct_state);
return -EINVAL;
@@ -1149,6 +1148,9 @@
} else {
memset(nla_data(nla), val, nla_len(nla));
}
+
+ if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
+ *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
}
}
@@ -2432,11 +2434,7 @@
if (!start)
return -EMSGSIZE;
- err = ip_tun_to_nlattr(skb, &tun_info->key,
- tun_info->options_len ?
- ip_tunnel_info_opts(tun_info) : NULL,
- tun_info->options_len,
- ip_tunnel_info_af(tun_info));
+ err = ovs_nla_put_tunnel_info(skb, tun_info);
if (err)
return err;
nla_nest_end(skb, start);
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 6ca3f0b..47dd142 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -55,9 +55,9 @@
int ovs_nla_get_match(struct net *, struct sw_flow_match *,
const struct nlattr *key, const struct nlattr *mask,
bool log);
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
- const struct ip_tunnel_info *,
- const void *egress_tun_opts);
+
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info);
bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 2735e9c..efb736b 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -52,18 +52,6 @@
return 0;
}
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
-{
- struct geneve_port *geneve_port = geneve_vport(vport);
- struct net *net = ovs_dp_get_net(vport->dp);
- __be16 dport = htons(geneve_port->port_no);
- __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-
- return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
- skb, IPPROTO_UDP, sport, dport);
-}
-
static struct vport *geneve_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
@@ -128,9 +116,8 @@
.create = geneve_create,
.destroy = ovs_netdev_tunnel_destroy,
.get_options = geneve_get_options,
- .send = ovs_netdev_send,
+ .send = dev_queue_xmit,
.owner = THIS_MODULE,
- .get_egress_tun_info = geneve_get_egress_tun_info,
};
static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 4d24481..c3257d7 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -84,18 +84,10 @@
return ovs_netdev_link(vport, parms->name);
}
-static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
-{
- return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
- skb, IPPROTO_GRE, 0, 0);
-}
-
static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
- .send = ovs_netdev_send,
- .get_egress_tun_info = gre_get_egress_tun_info,
+ .send = dev_queue_xmit,
.destroy = ovs_netdev_tunnel_destroy,
.owner = THIS_MODULE,
};
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 388b8a6..ec76398 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -106,12 +106,45 @@
free_netdev(dev);
}
+static struct rtnl_link_stats64 *
+internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->tx_errors = dev->stats.tx_errors;
+ stats->tx_dropped = dev->stats.tx_dropped;
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *percpu_stats;
+ struct pcpu_sw_netstats local_stats;
+ unsigned int start;
+
+ percpu_stats = per_cpu_ptr(dev->tstats, i);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
+ local_stats = *percpu_stats;
+ } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
+
+ stats->rx_bytes += local_stats.rx_bytes;
+ stats->rx_packets += local_stats.rx_packets;
+ stats->tx_bytes += local_stats.tx_bytes;
+ stats->tx_packets += local_stats.tx_packets;
+ }
+
+ return stats;
+}
+
static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = internal_dev_change_mtu,
+ .ndo_get_stats64 = internal_get_stats,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -161,6 +194,11 @@
err = -ENOMEM;
goto error_free_vport;
}
+ vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!vport->dev->tstats) {
+ err = -ENOMEM;
+ goto error_free_netdev;
+ }
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
internal_dev = internal_dev_priv(vport->dev);
@@ -173,7 +211,7 @@
rtnl_lock();
err = register_netdevice(vport->dev);
if (err)
- goto error_free_netdev;
+ goto error_unlock;
dev_set_promiscuity(vport->dev, 1);
rtnl_unlock();
@@ -181,8 +219,10 @@
return vport;
-error_free_netdev:
+error_unlock:
rtnl_unlock();
+ free_percpu(vport->dev->tstats);
+error_free_netdev:
free_netdev(vport->dev);
error_free_vport:
ovs_vport_free(vport);
@@ -198,26 +238,25 @@
/* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(vport->dev);
-
+ free_percpu(vport->dev->tstats);
rtnl_unlock();
}
-static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
{
- struct net_device *netdev = vport->dev;
+ struct net_device *netdev = skb->dev;
struct pcpu_sw_netstats *stats;
if (unlikely(!(netdev->flags & IFF_UP))) {
kfree_skb(skb);
netdev->stats.rx_dropped++;
- return;
+ return NETDEV_TX_OK;
}
skb_dst_drop(skb);
nf_reset(skb);
secpath_reset(skb);
- skb->dev = netdev;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, netdev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
@@ -229,6 +268,7 @@
u64_stats_update_end(&stats->syncp);
netif_rx(skb);
+ return NETDEV_TX_OK;
}
static struct vport_ops ovs_internal_vport_ops = {
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index f7e8dcc..b327368 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -190,37 +190,6 @@
}
EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
-static unsigned int packet_length(const struct sk_buff *skb)
-{
- unsigned int length = skb->len - ETH_HLEN;
-
- if (skb->protocol == htons(ETH_P_8021Q))
- length -= VLAN_HLEN;
-
- return length;
-}
-
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
-{
- int mtu = vport->dev->mtu;
-
- if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
- net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
- vport->dev->name,
- packet_length(skb), mtu);
- vport->dev->stats.tx_errors++;
- goto drop;
- }
-
- skb->dev = vport->dev;
- dev_queue_xmit(skb);
- return;
-
-drop:
- kfree_skb(skb);
-}
-EXPORT_SYMBOL_GPL(ovs_netdev_send);
-
/* Returns null if this device is not attached to a datapath. */
struct vport *ovs_netdev_get_vport(struct net_device *dev)
{
@@ -235,7 +204,7 @@
.type = OVS_VPORT_TYPE_NETDEV,
.create = netdev_create,
.destroy = netdev_destroy,
- .send = ovs_netdev_send,
+ .send = dev_queue_xmit,
};
int __init ovs_netdev_init(void)
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index bf22fce..19e29c1 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -27,7 +27,6 @@
struct vport *ovs_netdev_get_vport(struct net_device *dev);
struct vport *ovs_netdev_link(struct vport *vport, const char *name);
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
void ovs_netdev_detach_dev(struct vport *);
int __init ovs_netdev_init(void);
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index fb3cdb8..1605691 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -146,32 +146,12 @@
return ovs_netdev_link(vport, parms->name);
}
-static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
-{
- struct vxlan_dev *vxlan = netdev_priv(vport->dev);
- struct net *net = ovs_dp_get_net(vport->dp);
- unsigned short family = ip_tunnel_info_af(upcall->egress_tun_info);
- __be16 dst_port = vxlan_dev_dst_port(vxlan, family);
- __be16 src_port;
- int port_min;
- int port_max;
-
- inet_get_local_port_range(net, &port_min, &port_max);
- src_port = udp_flow_src_port(net, skb, 0, 0, true);
-
- return ovs_tunnel_get_egress_info(upcall, net,
- skb, IPPROTO_UDP,
- src_port, dst_port);
-}
-
static struct vport_ops ovs_vxlan_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_VXLAN,
.create = vxlan_create,
.destroy = ovs_netdev_tunnel_destroy,
.get_options = vxlan_get_options,
- .send = ovs_netdev_send,
- .get_egress_tun_info = vxlan_get_egress_tun_info,
+ .send = dev_queue_xmit,
};
static int __init ovs_vxlan_tnl_init(void)
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 12a36ac2..0ac0fd0 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -480,60 +480,32 @@
}
EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
- struct net *net,
- struct sk_buff *skb,
- u8 ipproto,
- __be16 tp_src,
- __be16 tp_dst)
+static unsigned int packet_length(const struct sk_buff *skb)
{
- struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
- const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
- const struct ip_tunnel_key *tun_key;
- u32 skb_mark = skb->mark;
- struct rtable *rt;
- struct flowi4 fl;
+ unsigned int length = skb->len - ETH_HLEN;
- if (unlikely(!tun_info))
- return -EINVAL;
- if (ip_tunnel_info_af(tun_info) != AF_INET)
- return -EINVAL;
+ if (skb->protocol == htons(ETH_P_8021Q))
+ length -= VLAN_HLEN;
- tun_key = &tun_info->key;
-
- /* Route lookup to get srouce IP address.
- * The process may need to be changed if the corresponding process
- * in vports ops changed.
- */
- rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
-
- ip_rt_put(rt);
-
- /* Generate egress_tun_info based on tun_info,
- * saddr, tp_src and tp_dst
- */
- ip_tunnel_key_init(&egress_tun_info->key,
- fl.saddr, tun_key->u.ipv4.dst,
- tun_key->tos,
- tun_key->ttl,
- tp_src, tp_dst,
- tun_key->tun_id,
- tun_key->tun_flags);
- egress_tun_info->options_len = tun_info->options_len;
- egress_tun_info->mode = tun_info->mode;
- upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
- return 0;
+ return length;
}
-EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
{
- /* get_egress_tun_info() is only implemented on tunnel ports. */
- if (unlikely(!vport->ops->get_egress_tun_info))
- return -EINVAL;
+ int mtu = vport->dev->mtu;
- return vport->ops->get_egress_tun_info(vport, skb, upcall);
+ if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+ net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+ vport->dev->name,
+ packet_length(skb), mtu);
+ vport->dev->stats.tx_errors++;
+ goto drop;
+ }
+
+ skb->dev = vport->dev;
+ vport->ops->send(skb);
+ return;
+
+drop:
+ kfree_skb(skb);
}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index a413f3a..bdfd82a 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -27,7 +27,6 @@
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/u64_stats_sync.h>
-#include <net/route.h>
#include "datapath.h"
@@ -53,16 +52,6 @@
int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
- struct net *net,
- struct sk_buff *,
- u8 ipproto,
- __be16 tp_src,
- __be16 tp_dst);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall);
-
/**
* struct vport_portids - array of netlink portids of a vport.
* must be protected by rcu.
@@ -140,8 +129,6 @@
* have any configuration.
* @send: Send a packet on the device.
* zero for dropped packets or negative for error.
- * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
- * a packet.
*/
struct vport_ops {
enum ovs_vport_type type;
@@ -153,10 +140,7 @@
int (*set_options)(struct vport *, struct nlattr *);
int (*get_options)(const struct vport *, struct sk_buff *);
- void (*send)(struct vport *, struct sk_buff *);
- int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
- struct dp_upcall_info *upcall);
-
+ netdev_tx_t (*send) (struct sk_buff *skb);
struct module *owner;
struct list_head list;
};
@@ -234,9 +218,6 @@
return rt;
}
-static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
-{
- vport->ops->send(vport, skb);
-}
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb);
#endif /* vport.h */
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 1eb76956..8950d39 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -746,7 +746,7 @@
.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
};
u16 mode = BRIDGE_MODE_UNDEF;
- u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+ u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
int err;
err = switchdev_port_attr_get(dev, &attr);
@@ -817,6 +817,9 @@
err = switchdev_port_br_setflag(dev, attr,
BR_LEARNING_SYNC);
break;
+ case IFLA_BRPORT_UNICAST_FLOOD:
+ err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index e7000be..ed98c1f 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -94,10 +94,14 @@
goto out;
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
- goto out;
+ goto out1;
register_sysctl_root(&net_sysctl_root);
out:
return ret;
+out1:
+ unregister_sysctl_table(net_header);
+ net_header = NULL;
+ goto out;
}
struct ctl_table_header *register_net_sysctl(struct net *net,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 41042de..9dc239d 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -35,741 +35,301 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/tipc_config.h>
#include "socket.h"
#include "msg.h"
#include "bcast.h"
#include "name_distr.h"
-#include "core.h"
+#include "link.h"
+#include "node.h"
-#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
-#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
+#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
+#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
const char tipc_bclink_name[] = "broadcast-link";
-static void tipc_nmap_diff(struct tipc_node_map *nm_a,
- struct tipc_node_map *nm_b,
- struct tipc_node_map *nm_diff);
-static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
-static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-
-static void tipc_bclink_lock(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- spin_lock_bh(&tn->bclink->lock);
-}
-
-static void tipc_bclink_unlock(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- spin_unlock_bh(&tn->bclink->lock);
-}
-
-void tipc_bclink_input(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
-}
-
-uint tipc_bclink_get_mtu(void)
-{
- return MAX_PKT_DEFAULT_MCAST;
-}
-
-static u32 bcbuf_acks(struct sk_buff *buf)
-{
- return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
-}
-
-static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
-{
- TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
-}
-
-static void bcbuf_decr_acks(struct sk_buff *buf)
-{
- bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
-}
-
-void tipc_bclink_add_node(struct net *net, u32 addr)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- tipc_bclink_lock(net);
- tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
- tipc_bclink_unlock(net);
-}
-
-void tipc_bclink_remove_node(struct net *net, u32 addr)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- tipc_bclink_lock(net);
- tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
-
- /* Last node? => reset backlog queue */
- if (!tn->bclink->bcast_nodes.count)
- tipc_link_purge_backlog(&tn->bclink->link);
-
- tipc_bclink_unlock(net);
-}
-
-static void bclink_set_last_sent(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *bcl = tn->bcl;
-
- bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
-}
-
-u32 tipc_bclink_get_last_sent(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- return tn->bcl->silent_intv_cnt;
-}
-
-static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
-{
- node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
- seqno : node->bclink.last_sent;
-}
-
/**
- * tipc_bclink_retransmit_to - get most recent node to request retransmission
- *
- * Called with bclink_lock locked
+ * struct tipc_bc_base - base structure for keeping broadcast send state
+ * @link: broadcast send link structure
+ * @inputq: data input queue; will only carry SOCK_WAKEUP messages
+ * @dest: array keeping number of reachable destinations per bearer
+ * @primary_bearer: a bearer having links to all broadcast destinations, if any
*/
-struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+struct tipc_bc_base {
+ struct tipc_link *link;
+ struct sk_buff_head inputq;
+ int dests[MAX_BEARERS];
+ int primary_bearer;
+};
- return tn->bclink->retransmit_to;
+static struct tipc_bc_base *tipc_bc_base(struct net *net)
+{
+ return tipc_net(net)->bcbase;
}
-/**
- * bclink_retransmit_pkt - retransmit broadcast packets
- * @after: sequence number of last packet to *not* retransmit
- * @to: sequence number of last packet to retransmit
- *
- * Called with bclink_lock locked
- */
-static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
+int tipc_bcast_get_mtu(struct net *net)
{
- struct sk_buff *skb;
- struct tipc_link *bcl = tn->bcl;
-
- skb_queue_walk(&bcl->transmq, skb) {
- if (more(buf_seqno(skb), after)) {
- tipc_link_retransmit(bcl, skb, mod(to - after));
- break;
- }
- }
+ return tipc_link_mtu(tipc_bc_sndlink(net));
}
-/**
- * bclink_prepare_wakeup - prepare users for wakeup after congestion
- * @bcl: broadcast link
- * @resultq: queue for users which can be woken up
- * Move a number of waiting users, as permitted by available space in
- * the send queue, from link wait queue to specified queue for wakeup
+/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
+ * if any, and make it primary bearer
*/
-static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+static void tipc_bcbase_select_primary(struct net *net)
{
- int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
- int imp, lim;
- struct sk_buff *skb, *tmp;
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ int all_dests = tipc_link_bc_peers(bb->link);
+ int i, mtu;
- skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
- imp = TIPC_SKB_CB(skb)->chain_imp;
- lim = bcl->window + bcl->backlog[imp].limit;
- pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
- if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
+ bb->primary_bearer = INVALID_BEARER_ID;
+
+ if (!all_dests)
+ return;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (!bb->dests[i])
continue;
- skb_unlink(skb, &bcl->wakeupq);
- skb_queue_tail(resultq, skb);
+
+ mtu = tipc_bearer_mtu(net, i);
+ if (mtu < tipc_link_mtu(bb->link))
+ tipc_link_set_mtu(bb->link, mtu);
+
+ if (bb->dests[i] < all_dests)
+ continue;
+
+ bb->primary_bearer = i;
+
+ /* Reduce risk that all nodes select same primary */
+ if ((i ^ tipc_own_addr(net)) & 1)
+ break;
}
}
-/**
- * tipc_bclink_wakeup_users - wake up pending users
- *
- * Called with no locks taken
- */
-void tipc_bclink_wakeup_users(struct net *net)
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *bcl = tn->bcl;
- struct sk_buff_head resultq;
+ struct tipc_bc_base *bb = tipc_bc_base(net);
- skb_queue_head_init(&resultq);
- bclink_prepare_wakeup(bcl, &resultq);
- tipc_sk_rcv(net, &resultq);
+ tipc_bcast_lock(net);
+ bb->dests[bearer_id]++;
+ tipc_bcbase_select_primary(net);
+ tipc_bcast_unlock(net);
}
-/**
- * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
- * @n_ptr: node that sent acknowledgement info
- * @acked: broadcast sequence # that has been acknowledged
- *
- * Node is locked, bclink_lock unlocked.
- */
-void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
{
- struct sk_buff *skb, *tmp;
- unsigned int released = 0;
- struct net *net = n_ptr->net;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bc_base *bb = tipc_bc_base(net);
- if (unlikely(!n_ptr->bclink.recv_permitted))
+ tipc_bcast_lock(net);
+ bb->dests[bearer_id]--;
+ tipc_bcbase_select_primary(net);
+ tipc_bcast_unlock(net);
+}
+
+/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
+ *
+ * Note that number of reachable destinations, as indicated in the dests[]
+ * array, may transitionally differ from the number of destinations indicated
+ * in each sent buffer. We can sustain this. Excess destination nodes will
+ * drop and never acknowledge the unexpected packets, and missing destinations
+ * will either require retransmission (if they are just about to be added to
+ * the bearer), or be removed from the buffer's 'ackers' counter (if they
+ * just went down)
+ */
+static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
+{
+ int bearer_id;
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ struct sk_buff *skb, *_skb;
+ struct sk_buff_head _xmitq;
+
+ if (skb_queue_empty(xmitq))
return;
- tipc_bclink_lock(net);
-
- /* Bail out if tx queue is empty (no clean up is required) */
- skb = skb_peek(&tn->bcl->transmq);
- if (!skb)
- goto exit;
-
- /* Determine which messages need to be acknowledged */
- if (acked == INVALID_LINK_SEQ) {
- /*
- * Contact with specified node has been lost, so need to
- * acknowledge sent messages only (if other nodes still exist)
- * or both sent and unsent messages (otherwise)
- */
- if (tn->bclink->bcast_nodes.count)
- acked = tn->bcl->silent_intv_cnt;
- else
- acked = tn->bcl->snd_nxt;
- } else {
- /*
- * Bail out if specified sequence number does not correspond
- * to a message that has been sent and not yet acknowledged
- */
- if (less(acked, buf_seqno(skb)) ||
- less(tn->bcl->silent_intv_cnt, acked) ||
- less_eq(acked, n_ptr->bclink.acked))
- goto exit;
+ /* The typical case: at least one bearer has links to all nodes */
+ bearer_id = bb->primary_bearer;
+ if (bearer_id >= 0) {
+ tipc_bearer_bc_xmit(net, bearer_id, xmitq);
+ return;
}
- /* Skip over packets that node has previously acknowledged */
- skb_queue_walk(&tn->bcl->transmq, skb) {
- if (more(buf_seqno(skb), n_ptr->bclink.acked))
- break;
- }
+ /* We have to transmit across all bearers */
+ skb_queue_head_init(&_xmitq);
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ if (!bb->dests[bearer_id])
+ continue;
- /* Update packets that node is now acknowledging */
- skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
- if (more(buf_seqno(skb), acked))
- break;
- bcbuf_decr_acks(skb);
- bclink_set_last_sent(net);
- if (bcbuf_acks(skb) == 0) {
- __skb_unlink(skb, &tn->bcl->transmq);
- kfree_skb(skb);
- released = 1;
+ skb_queue_walk(xmitq, skb) {
+ _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
+ if (!_skb)
+ break;
+ __skb_queue_tail(&_xmitq, _skb);
}
+ tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
}
- n_ptr->bclink.acked = acked;
-
- /* Try resolving broadcast link congestion, if necessary */
- if (unlikely(skb_peek(&tn->bcl->backlogq))) {
- tipc_link_push_packets(tn->bcl);
- bclink_set_last_sent(net);
- }
- if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
- n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
-exit:
- tipc_bclink_unlock(net);
+ __skb_queue_purge(xmitq);
+ __skb_queue_purge(&_xmitq);
}
-/**
- * tipc_bclink_update_link_state - update broadcast link state
- *
- * RCU and node lock set
- */
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
- u32 last_sent)
-{
- struct sk_buff *buf;
- struct net *net = n_ptr->net;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- /* Ignore "stale" link state info */
- if (less_eq(last_sent, n_ptr->bclink.last_in))
- return;
-
- /* Update link synchronization state; quit if in sync */
- bclink_update_last_sent(n_ptr, last_sent);
-
- if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
- return;
-
- /* Update out-of-sync state; quit if loss is still unconfirmed */
- if ((++n_ptr->bclink.oos_state) == 1) {
- if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
- return;
- n_ptr->bclink.oos_state++;
- }
-
- /* Don't NACK if one has been recently sent (or seen) */
- if (n_ptr->bclink.oos_state & 0x1)
- return;
-
- /* Send NACK */
- buf = tipc_buf_acquire(INT_H_SIZE);
- if (buf) {
- struct tipc_msg *msg = buf_msg(buf);
- struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
- u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
-
- tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
- INT_H_SIZE, n_ptr->addr);
- msg_set_non_seq(msg, 1);
- msg_set_mc_netid(msg, tn->net_id);
- msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
- msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
- msg_set_bcgap_to(msg, to);
-
- tipc_bclink_lock(net);
- tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
- tn->bcl->stats.sent_nacks++;
- tipc_bclink_unlock(net);
- kfree_skb(buf);
-
- n_ptr->bclink.oos_state++;
- }
-}
-
-void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
-{
- u16 last = msg_last_bcast(hdr);
- int mtyp = msg_type(hdr);
-
- if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
- return;
- if (mtyp == STATE_MSG) {
- tipc_bclink_update_link_state(n, last);
- return;
- }
- /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
- * and transfer synch info in LINK_PROTOCOL messages.
- */
- if (tipc_node_is_up(n))
- return;
- if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
- return;
- n->bclink.last_sent = last;
- n->bclink.last_in = last;
- n->bclink.oos_state = 0;
-}
-
-/**
- * bclink_peek_nack - monitor retransmission requests sent by other nodes
- *
- * Delay any upcoming NACK by this node if another node has already
- * requested the first message this node is going to ask for.
- */
-static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
-{
- struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
-
- if (unlikely(!n_ptr))
- return;
-
- tipc_node_lock(n_ptr);
- if (n_ptr->bclink.recv_permitted &&
- (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
- (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
- n_ptr->bclink.oos_state = 2;
- tipc_node_unlock(n_ptr);
- tipc_node_put(n_ptr);
-}
-
-/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
+/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
* and to identified node local sockets
* @net: the applicable net namespace
* @list: chain of buffers containing message
* Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/
-int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *bcl = tn->bcl;
- struct tipc_bclink *bclink = tn->bclink;
+ struct tipc_link *l = tipc_bc_sndlink(net);
+ struct sk_buff_head xmitq, inputq, rcvq;
int rc = 0;
- int bc = 0;
- struct sk_buff *skb;
- struct sk_buff_head arrvq;
- struct sk_buff_head inputq;
- /* Prepare clone of message for local node */
- skb = tipc_msg_reassemble(list);
- if (unlikely(!skb))
+ __skb_queue_head_init(&rcvq);
+ __skb_queue_head_init(&xmitq);
+ skb_queue_head_init(&inputq);
+
+ /* Prepare message clone for local node */
+ if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
return -EHOSTUNREACH;
- /* Broadcast to all nodes */
- if (likely(bclink)) {
- tipc_bclink_lock(net);
- if (likely(bclink->bcast_nodes.count)) {
- rc = __tipc_link_xmit(net, bcl, list);
- if (likely(!rc)) {
- u32 len = skb_queue_len(&bcl->transmq);
+ tipc_bcast_lock(net);
+ if (tipc_link_bc_peers(l))
+ rc = tipc_link_xmit(l, list, &xmitq);
+ tipc_bcast_unlock(net);
- bclink_set_last_sent(net);
- bcl->stats.queue_sz_counts++;
- bcl->stats.accu_queue_sz += len;
- }
- bc = 1;
- }
- tipc_bclink_unlock(net);
- }
-
- if (unlikely(!bc))
- __skb_queue_purge(list);
-
+ /* Don't send to local node if adding to link failed */
if (unlikely(rc)) {
- kfree_skb(skb);
+ __skb_queue_purge(&rcvq);
return rc;
}
- /* Deliver message clone */
- __skb_queue_head_init(&arrvq);
- skb_queue_head_init(&inputq);
- __skb_queue_tail(&arrvq, skb);
- tipc_sk_mcast_rcv(net, &arrvq, &inputq);
- return rc;
-}
-/**
- * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
- *
- * Called with both sending node's lock and bclink_lock taken.
- */
-static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
-{
- struct tipc_net *tn = net_generic(node->net, tipc_net_id);
-
- bclink_update_last_sent(node, seqno);
- node->bclink.last_in = seqno;
- node->bclink.oos_state = 0;
- tn->bcl->stats.recv_info++;
-
- /*
- * Unicast an ACK periodically, ensuring that
- * all nodes in the cluster don't ACK at the same time
- */
- if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
- tipc_link_proto_xmit(node_active_link(node, node->addr),
- STATE_MSG, 0, 0, 0, 0);
- tn->bcl->stats.sent_acks++;
- }
-}
-
-/**
- * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
- *
- * RCU is locked, no other locks set
- */
-void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *bcl = tn->bcl;
- struct tipc_msg *msg = buf_msg(buf);
- struct tipc_node *node;
- u32 next_in;
- u32 seqno;
- int deferred = 0;
- int pos = 0;
- struct sk_buff *iskb;
- struct sk_buff_head *arrvq, *inputq;
-
- /* Screen out unwanted broadcast messages */
- if (msg_mc_netid(msg) != tn->net_id)
- goto exit;
-
- node = tipc_node_find(net, msg_prevnode(msg));
- if (unlikely(!node))
- goto exit;
-
- tipc_node_lock(node);
- if (unlikely(!node->bclink.recv_permitted))
- goto unlock;
-
- /* Handle broadcast protocol message */
- if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
- if (msg_type(msg) != STATE_MSG)
- goto unlock;
- if (msg_destnode(msg) == tn->own_addr) {
- tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
- tipc_bclink_lock(net);
- bcl->stats.recv_nacks++;
- tn->bclink->retransmit_to = node;
- bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
- msg_bcgap_to(msg));
- tipc_bclink_unlock(net);
- tipc_node_unlock(node);
- } else {
- tipc_node_unlock(node);
- bclink_peek_nack(net, msg);
- }
- tipc_node_put(node);
- goto exit;
- }
-
- /* Handle in-sequence broadcast message */
- seqno = msg_seqno(msg);
- next_in = mod(node->bclink.last_in + 1);
- arrvq = &tn->bclink->arrvq;
- inputq = &tn->bclink->inputq;
-
- if (likely(seqno == next_in)) {
-receive:
- /* Deliver message to destination */
- if (likely(msg_isdata(msg))) {
- tipc_bclink_lock(net);
- bclink_accept_pkt(node, seqno);
- spin_lock_bh(&inputq->lock);
- __skb_queue_tail(arrvq, buf);
- spin_unlock_bh(&inputq->lock);
- node->action_flags |= TIPC_BCAST_MSG_EVT;
- tipc_bclink_unlock(net);
- tipc_node_unlock(node);
- } else if (msg_user(msg) == MSG_BUNDLER) {
- tipc_bclink_lock(net);
- bclink_accept_pkt(node, seqno);
- bcl->stats.recv_bundles++;
- bcl->stats.recv_bundled += msg_msgcnt(msg);
- pos = 0;
- while (tipc_msg_extract(buf, &iskb, &pos)) {
- spin_lock_bh(&inputq->lock);
- __skb_queue_tail(arrvq, iskb);
- spin_unlock_bh(&inputq->lock);
- }
- node->action_flags |= TIPC_BCAST_MSG_EVT;
- tipc_bclink_unlock(net);
- tipc_node_unlock(node);
- } else if (msg_user(msg) == MSG_FRAGMENTER) {
- tipc_bclink_lock(net);
- bclink_accept_pkt(node, seqno);
- tipc_buf_append(&node->bclink.reasm_buf, &buf);
- if (unlikely(!buf && !node->bclink.reasm_buf)) {
- tipc_bclink_unlock(net);
- goto unlock;
- }
- bcl->stats.recv_fragments++;
- if (buf) {
- bcl->stats.recv_fragmented++;
- msg = buf_msg(buf);
- tipc_bclink_unlock(net);
- goto receive;
- }
- tipc_bclink_unlock(net);
- tipc_node_unlock(node);
- } else {
- tipc_bclink_lock(net);
- bclink_accept_pkt(node, seqno);
- tipc_bclink_unlock(net);
- tipc_node_unlock(node);
- kfree_skb(buf);
- }
- buf = NULL;
-
- /* Determine new synchronization state */
- tipc_node_lock(node);
- if (unlikely(!tipc_node_is_up(node)))
- goto unlock;
-
- if (node->bclink.last_in == node->bclink.last_sent)
- goto unlock;
-
- if (skb_queue_empty(&node->bclink.deferdq)) {
- node->bclink.oos_state = 1;
- goto unlock;
- }
-
- msg = buf_msg(skb_peek(&node->bclink.deferdq));
- seqno = msg_seqno(msg);
- next_in = mod(next_in + 1);
- if (seqno != next_in)
- goto unlock;
-
- /* Take in-sequence message from deferred queue & deliver it */
- buf = __skb_dequeue(&node->bclink.deferdq);
- goto receive;
- }
-
- /* Handle out-of-sequence broadcast message */
- if (less(next_in, seqno)) {
- deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
- buf);
- bclink_update_last_sent(node, seqno);
- buf = NULL;
- }
-
- tipc_bclink_lock(net);
-
- if (deferred)
- bcl->stats.deferred_recv++;
- else
- bcl->stats.duplicates++;
-
- tipc_bclink_unlock(net);
-
-unlock:
- tipc_node_unlock(node);
- tipc_node_put(node);
-exit:
- kfree_skb(buf);
-}
-
-u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
-{
- return (n_ptr->bclink.recv_permitted &&
- (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
-}
-
-
-/**
- * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
- *
- * Send packet over as many bearers as necessary to reach all nodes
- * that have joined the broadcast link.
- *
- * Returns 0 (packet sent successfully) under all circumstances,
- * since the broadcast link's pseudo-bearer never blocks
- */
-static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
- struct tipc_bearer *unused1,
- struct tipc_media_addr *unused2)
-{
- int bp_index;
- struct tipc_msg *msg = buf_msg(buf);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_bcbearer *bcbearer = tn->bcbearer;
- struct tipc_bclink *bclink = tn->bclink;
-
- /* Prepare broadcast link message for reliable transmission,
- * if first time trying to send it;
- * preparation is skipped for broadcast link protocol messages
- * since they are sent in an unreliable manner and don't need it
- */
- if (likely(!msg_non_seq(buf_msg(buf)))) {
- bcbuf_set_acks(buf, bclink->bcast_nodes.count);
- msg_set_non_seq(msg, 1);
- msg_set_mc_netid(msg, tn->net_id);
- tn->bcl->stats.sent_info++;
- if (WARN_ON(!bclink->bcast_nodes.count)) {
- dump_stack();
- return 0;
- }
- }
-
- /* Send buffer over bearers until all targets reached */
- bcbearer->remains = bclink->bcast_nodes;
-
- for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
- struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
- struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
- struct tipc_bearer *bp[2] = {p, s};
- struct tipc_bearer *b = bp[msg_link_selector(msg)];
- struct sk_buff *tbuf;
-
- if (!p)
- break; /* No more bearers to try */
- if (!b)
- b = p;
- tipc_nmap_diff(&bcbearer->remains, &b->nodes,
- &bcbearer->remains_new);
- if (bcbearer->remains_new.count == bcbearer->remains.count)
- continue; /* Nothing added by bearer pair */
-
- if (bp_index == 0) {
- /* Use original buffer for first bearer */
- tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
- } else {
- /* Avoid concurrent buffer access */
- tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
- if (!tbuf)
- break;
- tipc_bearer_send(net, b->identity, tbuf,
- &b->bcast_addr);
- kfree_skb(tbuf); /* Bearer keeps a clone */
- }
- if (bcbearer->remains_new.count == 0)
- break; /* All targets reached */
-
- bcbearer->remains = bcbearer->remains_new;
- }
-
+ /* Broadcast to all nodes, inluding local node */
+ tipc_bcbase_xmit(net, &xmitq);
+ tipc_sk_mcast_rcv(net, &rcvq, &inputq);
+ __skb_queue_purge(list);
return 0;
}
-/**
- * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
+ *
+ * RCU is locked, no other locks set
*/
-void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
- u32 node, bool action)
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_bcbearer *bcbearer = tn->bcbearer;
- struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
- struct tipc_bcbearer_pair *bp_curr;
- struct tipc_bearer *b;
- int b_index;
- int pri;
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct sk_buff_head xmitq;
+ int rc;
- tipc_bclink_lock(net);
+ __skb_queue_head_init(&xmitq);
- if (action)
- tipc_nmap_add(nm_ptr, node);
+ if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ tipc_bcast_lock(net);
+ if (msg_user(hdr) == BCAST_PROTOCOL)
+ rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
else
- tipc_nmap_remove(nm_ptr, node);
+ rc = tipc_link_rcv(l, skb, NULL);
+ tipc_bcast_unlock(net);
- /* Group bearers by priority (can assume max of two per priority) */
- memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+ tipc_bcbase_xmit(net, &xmitq);
- rcu_read_lock();
- for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
- b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
- if (!b || !b->nodes.count)
- continue;
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
- if (!bp_temp[b->priority].primary)
- bp_temp[b->priority].primary = b;
- else
- bp_temp[b->priority].secondary = b;
+ return rc;
+}
+
+/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
+{
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_bcast_lock(net);
+ tipc_link_bc_ack_rcv(l, acked, &xmitq);
+ tipc_bcast_unlock(net);
+
+ tipc_bcbase_xmit(net, &xmitq);
+
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
+}
+
+/* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr)
+{
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_bcast_lock(net);
+ if (msg_type(hdr) == STATE_MSG) {
+ tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
+ tipc_link_bc_sync_rcv(l, hdr, &xmitq);
+ } else {
+ tipc_link_bc_init_rcv(l, hdr);
}
- rcu_read_unlock();
+ tipc_bcast_unlock(net);
- /* Create array of bearer pairs for broadcasting */
- bp_curr = bcbearer->bpairs;
- memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
+ tipc_bcbase_xmit(net, &xmitq);
- for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
+}
- if (!bp_temp[pri].primary)
- continue;
+/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_link *snd_l = tipc_bc_sndlink(net);
- bp_curr->primary = bp_temp[pri].primary;
+ tipc_bcast_lock(net);
+ tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
+ tipc_bcbase_select_primary(net);
+ tipc_bcast_unlock(net);
+}
- if (bp_temp[pri].secondary) {
- if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
- &bp_temp[pri].secondary->nodes)) {
- bp_curr->secondary = bp_temp[pri].secondary;
- } else {
- bp_curr++;
- bp_curr->primary = bp_temp[pri].secondary;
- }
- }
+/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
+{
+ struct tipc_link *snd_l = tipc_bc_sndlink(net);
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct sk_buff_head xmitq;
- bp_curr++;
- }
+ __skb_queue_head_init(&xmitq);
- tipc_bclink_unlock(net);
+ tipc_bcast_lock(net);
+ tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
+ tipc_bcbase_select_primary(net);
+ tipc_bcast_unlock(net);
+
+ tipc_bcbase_xmit(net, &xmitq);
+
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
}
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -835,7 +395,7 @@
if (!bcl)
return 0;
- tipc_bclink_lock(net);
+ tipc_bcast_lock(net);
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -870,7 +430,7 @@
if (err)
goto attr_msg_full;
- tipc_bclink_unlock(net);
+ tipc_bcast_unlock(net);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
@@ -881,7 +441,7 @@
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
- tipc_bclink_unlock(net);
+ tipc_bcast_unlock(net);
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
@@ -895,25 +455,25 @@
if (!bcl)
return -ENOPROTOOPT;
- tipc_bclink_lock(net);
+ tipc_bcast_lock(net);
memset(&bcl->stats, 0, sizeof(bcl->stats));
- tipc_bclink_unlock(net);
+ tipc_bcast_unlock(net);
return 0;
}
-int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
+static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *bcl = tn->bcl;
+ struct tipc_link *l = tipc_bc_sndlink(net);
- if (!bcl)
+ if (!l)
return -ENOPROTOOPT;
- if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+ if (limit < BCLINK_WIN_MIN)
+ limit = BCLINK_WIN_MIN;
+ if (limit > TIPC_MAX_LINK_WIN)
return -EINVAL;
-
- tipc_bclink_lock(net);
- tipc_link_set_queue_limits(bcl, limit);
- tipc_bclink_unlock(net);
+ tipc_bcast_lock(net);
+ tipc_link_set_queue_limits(l, limit);
+ tipc_bcast_unlock(net);
return 0;
}
@@ -935,123 +495,51 @@
win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
- return tipc_bclink_set_queue_limits(net, win);
+ return tipc_bc_link_set_queue_limits(net, win);
}
-int tipc_bclink_init(struct net *net)
+int tipc_bcast_init(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_bcbearer *bcbearer;
- struct tipc_bclink *bclink;
- struct tipc_link *bcl;
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bc_base *bb = NULL;
+ struct tipc_link *l = NULL;
- bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
- if (!bcbearer)
- return -ENOMEM;
+ bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
+ if (!bb)
+ goto enomem;
+ tn->bcbase = bb;
+ spin_lock_init(&tipc_net(net)->bclock);
- bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
- if (!bclink) {
- kfree(bcbearer);
- return -ENOMEM;
- }
-
- bcl = &bclink->link;
- bcbearer->bearer.media = &bcbearer->media;
- bcbearer->media.send_msg = tipc_bcbearer_send;
- sprintf(bcbearer->media.name, "tipc-broadcast");
-
- spin_lock_init(&bclink->lock);
- __skb_queue_head_init(&bcl->transmq);
- __skb_queue_head_init(&bcl->backlogq);
- __skb_queue_head_init(&bcl->deferdq);
- skb_queue_head_init(&bcl->wakeupq);
- bcl->snd_nxt = 1;
- spin_lock_init(&bclink->node.lock);
- __skb_queue_head_init(&bclink->arrvq);
- skb_queue_head_init(&bclink->inputq);
- bcl->owner = &bclink->node;
- bcl->owner->net = net;
- bcl->mtu = MAX_PKT_DEFAULT_MCAST;
- tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
- bcl->bearer_id = MAX_BEARERS;
- rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
- bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
- msg_set_prevnode(bcl->pmsg, tn->own_addr);
- strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
- tn->bcbearer = bcbearer;
- tn->bclink = bclink;
- tn->bcl = bcl;
+ if (!tipc_link_bc_create(net, 0, 0,
+ U16_MAX,
+ BCLINK_WIN_DEFAULT,
+ 0,
+ &bb->inputq,
+ NULL,
+ NULL,
+ &l))
+ goto enomem;
+ bb->link = l;
+ tn->bcl = l;
return 0;
+enomem:
+ kfree(bb);
+ kfree(l);
+ return -ENOMEM;
}
-void tipc_bclink_stop(struct net *net)
+void tipc_bcast_reinit(struct net *net)
+{
+ struct tipc_bc_base *b = tipc_bc_base(net);
+
+ msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
+}
+
+void tipc_bcast_stop(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- tipc_bclink_lock(net);
- tipc_link_purge_queues(tn->bcl);
- tipc_bclink_unlock(net);
-
- RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
synchronize_net();
- kfree(tn->bcbearer);
- kfree(tn->bclink);
-}
-
-/**
- * tipc_nmap_add - add a node to a node map
- */
-static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
-{
- int n = tipc_node(node);
- int w = n / WSIZE;
- u32 mask = (1 << (n % WSIZE));
-
- if ((nm_ptr->map[w] & mask) == 0) {
- nm_ptr->count++;
- nm_ptr->map[w] |= mask;
- }
-}
-
-/**
- * tipc_nmap_remove - remove a node from a node map
- */
-static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
-{
- int n = tipc_node(node);
- int w = n / WSIZE;
- u32 mask = (1 << (n % WSIZE));
-
- if ((nm_ptr->map[w] & mask) != 0) {
- nm_ptr->map[w] &= ~mask;
- nm_ptr->count--;
- }
-}
-
-/**
- * tipc_nmap_diff - find differences between node maps
- * @nm_a: input node map A
- * @nm_b: input node map B
- * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
- */
-static void tipc_nmap_diff(struct tipc_node_map *nm_a,
- struct tipc_node_map *nm_b,
- struct tipc_node_map *nm_diff)
-{
- int stop = ARRAY_SIZE(nm_a->map);
- int w;
- int b;
- u32 map;
-
- memset(nm_diff, 0, sizeof(*nm_diff));
- for (w = 0; w < stop; w++) {
- map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
- nm_diff->map[w] = map;
- if (map != 0) {
- for (b = 0 ; b < WSIZE; b++) {
- if (map & (1 << b))
- nm_diff->count++;
- }
- }
- }
+ kfree(tn->bcbase);
+ kfree(tn->bcl);
}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index d74c69b..2855b93 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -37,102 +37,44 @@
#ifndef _TIPC_BCAST_H
#define _TIPC_BCAST_H
-#include <linux/tipc_config.h>
-#include "link.h"
-#include "node.h"
-
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
- struct tipc_bearer *primary;
- struct tipc_bearer *secondary;
-};
-
-#define BCBEARER MAX_BEARERS
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines. Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
- struct tipc_bearer bearer;
- struct tipc_media media;
- struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
- struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
- struct tipc_node_map remains;
- struct tipc_node_map remains_new;
-};
-
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
- spinlock_t lock;
- struct tipc_link link;
- struct tipc_node node;
- struct sk_buff_head arrvq;
- struct sk_buff_head inputq;
- struct tipc_node_map bcast_nodes;
- struct tipc_node *retransmit_to;
-};
+#include "core.h"
struct tipc_node;
-extern const char tipc_bclink_name[];
+struct tipc_msg;
+struct tipc_nl_msg;
+struct tipc_node_map;
-/**
- * tipc_nmap_equal - test for equality of node maps
- */
-static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
- struct tipc_node_map *nm_b)
-{
- return !memcmp(nm_a, nm_b, sizeof(*nm_a));
-}
-
-int tipc_bclink_init(struct net *net);
-void tipc_bclink_stop(struct net *net);
-void tipc_bclink_add_node(struct net *net, u32 addr);
-void tipc_bclink_remove_node(struct net *net, u32 addr);
-struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
-void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
-u32 tipc_bclink_get_last_sent(struct net *net);
-u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_update_link_state(struct tipc_node *node,
- u32 last_sent);
-int tipc_bclink_reset_stats(struct net *net);
-int tipc_bclink_set_queue_limits(struct net *net, u32 limit);
-void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
- u32 node, bool action);
-uint tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
-void tipc_bclink_wakeup_users(struct net *net);
+int tipc_bcast_init(struct net *net);
+void tipc_bcast_reinit(struct net *net);
+void tipc_bcast_stop(struct net *net);
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
+int tipc_bcast_get_mtu(struct net *net);
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
+void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
-void tipc_bclink_input(struct net *net);
-void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
+int tipc_bclink_reset_stats(struct net *net);
+
+static inline void tipc_bcast_lock(struct net *net)
+{
+ spin_lock_bh(&tipc_net(net)->bclock);
+}
+
+static inline void tipc_bcast_unlock(struct net *net)
+{
+ spin_unlock_bh(&tipc_net(net)->bclock);
+}
+
+static inline struct tipc_link *tipc_bc_sndlink(struct net *net)
+{
+ return tipc_net(net)->bcl;
+}
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 82b2786..648f2a6 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -193,10 +193,8 @@
rcu_read_lock();
b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
- if (b_ptr) {
- tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
+ if (b_ptr)
tipc_disc_add_dest(b_ptr->link_req);
- }
rcu_read_unlock();
}
@@ -207,10 +205,8 @@
rcu_read_lock();
b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
- if (b_ptr) {
- tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
+ if (b_ptr)
tipc_disc_remove_dest(b_ptr->link_req);
- }
rcu_read_unlock();
}
@@ -418,10 +414,9 @@
* @b_ptr: the bearer through which the packet is to be sent
* @dest: peer destination address
*/
-int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
struct tipc_bearer *b, struct tipc_media_addr *dest)
{
- struct sk_buff *clone;
struct net_device *dev;
int delta;
@@ -429,42 +424,48 @@
if (!dev)
return 0;
- clone = skb_clone(buf, GFP_ATOMIC);
- if (!clone)
- return 0;
-
- delta = dev->hard_header_len - skb_headroom(buf);
+ delta = dev->hard_header_len - skb_headroom(skb);
if ((delta > 0) &&
- pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(clone);
+ pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
return 0;
}
- skb_reset_network_header(clone);
- clone->dev = dev;
- clone->protocol = htons(ETH_P_TIPC);
- dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
- dev->dev_addr, clone->len);
- dev_queue_xmit(clone);
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_TIPC);
+ dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
+ dev->dev_addr, skb->len);
+ dev_queue_xmit(skb);
return 0;
}
-/* tipc_bearer_send- sends buffer to destination over bearer
- *
- * IMPORTANT:
- * The media send routine must not alter the buffer being passed in
- * as it may be needed for later retransmission!
- */
-void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
- struct tipc_media_addr *dest)
+int tipc_bearer_mtu(struct net *net, u32 bearer_id)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_bearer *b_ptr;
+ int mtu = 0;
+ struct tipc_bearer *b;
rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
- if (likely(b_ptr))
- b_ptr->media->send_msg(net, buf, b_ptr, dest);
+ b = rcu_dereference_rtnl(tipc_net(net)->bearer_list[bearer_id]);
+ if (b)
+ mtu = b->mtu;
+ rcu_read_unlock();
+ return mtu;
+}
+
+/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
+ */
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+ struct sk_buff *skb,
+ struct tipc_media_addr *dest)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ if (likely(b))
+ b->media->send_msg(net, skb, b, dest);
rcu_read_unlock();
}
@@ -487,8 +488,31 @@
skb_queue_walk_safe(xmitq, skb, tmp) {
__skb_dequeue(xmitq);
b->media->send_msg(net, skb, b, dst);
- /* Until we remove cloning in tipc_l2_send_msg(): */
- kfree_skb(skb);
+ }
+ }
+ rcu_read_unlock();
+}
+
+/* tipc_bearer_bc_xmit() - broadcast buffers to all destinations
+ */
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_net *tn = tipc_net(net);
+ int net_id = tn->net_id;
+ struct tipc_bearer *b;
+ struct sk_buff *skb, *tmp;
+ struct tipc_msg *hdr;
+
+ rcu_read_lock();
+ b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ if (likely(b)) {
+ skb_queue_walk_safe(xmitq, skb, tmp) {
+ hdr = buf_msg(skb);
+ msg_set_non_seq(hdr, 1);
+ msg_set_mc_netid(hdr, net_id);
+ __skb_dequeue(xmitq);
+ b->media->send_msg(net, skb, b, &b->bcast_addr);
}
}
rcu_read_unlock();
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 6426f24..552185b 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -163,6 +163,7 @@
u32 identity;
struct tipc_link_req *link_req;
char net_plane;
+ int node_cnt;
struct tipc_node_map nodes;
};
@@ -215,10 +216,14 @@
int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
-void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
- struct tipc_media_addr *dest);
+int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+ struct sk_buff *skb,
+ struct tipc_media_addr *dest);
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
struct tipc_media_addr *dst);
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq);
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 005ba5e..03a8428 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -42,6 +42,7 @@
#include "bearer.h"
#include "net.h"
#include "socket.h"
+#include "bcast.h"
#include <linux/module.h>
@@ -71,8 +72,15 @@
err = tipc_topsrv_start(net);
if (err)
goto out_subscr;
+
+ err = tipc_bcast_init(net);
+ if (err)
+ goto out_bclink;
+
return 0;
+out_bclink:
+ tipc_bcast_stop(net);
out_subscr:
tipc_nametbl_stop(net);
out_nametbl:
@@ -85,6 +93,7 @@
{
tipc_topsrv_stop(net);
tipc_net_stop(net);
+ tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
}
diff --git a/net/tipc/core.h b/net/tipc/core.h
index b96b41e..18e95a8 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -62,8 +62,7 @@
struct tipc_node;
struct tipc_bearer;
-struct tipc_bcbearer;
-struct tipc_bclink;
+struct tipc_bc_base;
struct tipc_link;
struct tipc_name_table;
struct tipc_server;
@@ -93,8 +92,8 @@
struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
/* Broadcast link */
- struct tipc_bcbearer *bcbearer;
- struct tipc_bclink *bclink;
+ spinlock_t bclock;
+ struct tipc_bc_base *bcbase;
struct tipc_link *bcl;
/* Socket hash table */
@@ -114,6 +113,11 @@
return net_generic(net, tipc_net_id);
}
+static inline int tipc_netid(struct net *net)
+{
+ return tipc_net(net)->net_id;
+}
+
static inline u16 mod(u16 x)
{
return x & 0xffffu;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index d14e0a4..afe8c47 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -89,7 +89,7 @@
MAX_H_SIZE, dest_domain);
msg_set_non_seq(msg, 1);
msg_set_node_sig(msg, tn->random);
- msg_set_node_capabilities(msg, 0);
+ msg_set_node_capabilities(msg, TIPC_NODE_CAPABILITIES);
msg_set_dest_domain(msg, dest_domain);
msg_set_bc_netid(msg, tn->net_id);
b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
@@ -167,11 +167,10 @@
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
rskb = tipc_buf_acquire(MAX_H_SIZE);
- if (rskb) {
- tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
- tipc_bearer_send(net, bearer->identity, rskb, &maddr);
- kfree_skb(rskb);
- }
+ if (!rskb)
+ return;
+ tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+ tipc_bearer_xmit_skb(net, bearer->identity, rskb, &maddr);
}
}
@@ -225,6 +224,7 @@
static void disc_timeout(unsigned long data)
{
struct tipc_link_req *req = (struct tipc_link_req *)data;
+ struct sk_buff *skb;
int max_delay;
spin_lock_bh(&req->lock);
@@ -242,9 +242,9 @@
* hold at fast polling rate if don't have any associated nodes,
* otherwise hold at slow polling rate
*/
- tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest);
-
-
+ skb = skb_clone(req->buf, GFP_ATOMIC);
+ if (skb)
+ tipc_bearer_xmit_skb(req->net, req->bearer_id, skb, &req->dest);
req->timer_intv *= 2;
if (req->num_nodes)
max_delay = TIPC_LINK_REQ_SLOW;
@@ -271,6 +271,7 @@
struct tipc_media_addr *dest)
{
struct tipc_link_req *req;
+ struct sk_buff *skb;
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
@@ -292,7 +293,9 @@
setup_timer(&req->timer, disc_timeout, (unsigned long)req);
mod_timer(&req->timer, jiffies + req->timer_intv);
b_ptr->link_req = req;
- tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
+ skb = skb_clone(req->buf, GFP_ATOMIC);
+ if (skb)
+ tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
return 0;
}
@@ -316,6 +319,7 @@
void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
{
struct tipc_link_req *req = b_ptr->link_req;
+ struct sk_buff *skb;
spin_lock_bh(&req->lock);
tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
@@ -325,6 +329,8 @@
req->num_nodes = 0;
req->timer_intv = TIPC_LINK_REQ_INIT;
mod_timer(&req->timer, jiffies + req->timer_intv);
- tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
+ skb = skb_clone(req->buf, GFP_ATOMIC);
+ if (skb)
+ tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
spin_unlock_bh(&req->lock);
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ff9b0b9..9efbdbd 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -50,6 +50,7 @@
*/
static const char *link_co_err = "Link tunneling error, ";
static const char *link_rst_msg = "Resetting link ";
+static const char tipc_bclink_name[] = "broadcast-link";
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
[TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
@@ -75,6 +76,14 @@
[TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
};
+/* Send states for broadcast NACKs
+ */
+enum {
+ BC_NACK_SND_CONDITIONAL,
+ BC_NACK_SND_UNCONDITIONAL,
+ BC_NACK_SND_SUPPRESS,
+};
+
/*
* Interval between NACKs when packets arrive out of order
*/
@@ -110,7 +119,11 @@
struct sk_buff_head *xmitq);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
+static void tipc_link_build_nack_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
/*
* Simple non-static link routines (i.e. referenced outside this file)
@@ -150,11 +163,66 @@
return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
}
+static bool link_is_bc_sndlink(struct tipc_link *l)
+{
+ return !l->bc_sndlink;
+}
+
+static bool link_is_bc_rcvlink(struct tipc_link *l)
+{
+ return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
+}
+
int tipc_link_is_active(struct tipc_link *l)
{
- struct tipc_node *n = l->owner;
+ return l->active;
+}
- return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
+void tipc_link_set_active(struct tipc_link *l, bool active)
+{
+ l->active = active;
+}
+
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *uc_l,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_link *rcv_l = uc_l->bc_rcvlink;
+
+ snd_l->ackers++;
+ rcv_l->acked = snd_l->snd_nxt - 1;
+ tipc_link_build_bc_init_msg(uc_l, xmitq);
+}
+
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *rcv_l,
+ struct sk_buff_head *xmitq)
+{
+ u16 ack = snd_l->snd_nxt - 1;
+
+ snd_l->ackers--;
+ tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
+ tipc_link_reset(rcv_l);
+ rcv_l->state = LINK_RESET;
+ if (!snd_l->ackers) {
+ tipc_link_reset(snd_l);
+ __skb_queue_purge(xmitq);
+ }
+}
+
+int tipc_link_bc_peers(struct tipc_link *l)
+{
+ return l->ackers;
+}
+
+void tipc_link_set_mtu(struct tipc_link *l, int mtu)
+{
+ l->mtu = mtu;
+}
+
+int tipc_link_mtu(struct tipc_link *l)
+{
+ return l->mtu;
}
static u32 link_own_addr(struct tipc_link *l)
@@ -165,57 +233,72 @@
/**
* tipc_link_create - create a new link
* @n: pointer to associated node
- * @b: pointer to associated bearer
+ * @if_name: associated interface name
+ * @bearer_id: id (index) of associated bearer
+ * @tolerance: link tolerance to be used by link
+ * @net_plane: network plane (A,B,c..) this link belongs to
+ * @mtu: mtu to be advertised by link
+ * @priority: priority to be used by link
+ * @window: send window to be used by link
+ * @session: session to be used by link
* @ownnode: identity of own node
- * @peer: identity of peer node
- * @maddr: media address to be used
+ * @peer: node id of peer node
+ * @peer_caps: bitmap describing peer node capabilities
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ * @bc_rcvlink: the peer specific link used for broadcast reception
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
*
* Returns true if link was created, otherwise false
*/
-bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
- u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
- struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+ int tolerance, char net_plane, u32 mtu, int priority,
+ int window, u32 session, u32 ownnode, u32 peer,
+ u16 peer_caps,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link *bc_rcvlink,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
struct tipc_link **link)
{
struct tipc_link *l;
struct tipc_msg *hdr;
- char *if_name;
l = kzalloc(sizeof(*l), GFP_ATOMIC);
if (!l)
return false;
*link = l;
-
- /* Note: peer i/f name is completed by reset/activate message */
- if_name = strchr(b->name, ':') + 1;
- sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
- tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
- if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
-
- l->addr = peer;
- l->media_addr = maddr;
- l->owner = n;
- l->peer_session = WILDCARD_SESSION;
- l->bearer_id = b->identity;
- l->tolerance = b->tolerance;
- l->net_plane = b->net_plane;
- l->advertised_mtu = b->mtu;
- l->mtu = b->mtu;
- l->priority = b->priority;
- tipc_link_set_queue_limits(l, b->window);
- l->inputq = inputq;
- l->namedq = namedq;
- l->state = LINK_RESETTING;
l->pmsg = (struct tipc_msg *)&l->proto_msg;
hdr = l->pmsg;
tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
msg_set_size(hdr, sizeof(l->proto_msg));
msg_set_session(hdr, session);
msg_set_bearer_id(hdr, l->bearer_id);
+
+ /* Note: peer i/f name is completed by reset/activate message */
+ sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+ tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+ if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
strcpy((char *)msg_data(hdr), if_name);
+
+ l->addr = peer;
+ l->peer_caps = peer_caps;
+ l->net = net;
+ l->peer_session = WILDCARD_SESSION;
+ l->bearer_id = bearer_id;
+ l->tolerance = tolerance;
+ l->net_plane = net_plane;
+ l->advertised_mtu = mtu;
+ l->mtu = mtu;
+ l->priority = priority;
+ tipc_link_set_queue_limits(l, window);
+ l->ackers = 1;
+ l->bc_sndlink = bc_sndlink;
+ l->bc_rcvlink = bc_rcvlink;
+ l->inputq = inputq;
+ l->namedq = namedq;
+ l->state = LINK_RESETTING;
__skb_queue_head_init(&l->transmq);
__skb_queue_head_init(&l->backlogq);
__skb_queue_head_init(&l->deferdq);
@@ -224,27 +307,43 @@
return true;
}
-/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+/**
+ * tipc_link_bc_create - create new link to be used for broadcast
+ * @n: pointer to associated node
+ * @mtu: mtu to be used
+ * @window: send window to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
*
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
+ * Returns true if link was created, otherwise false
*/
-void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
- struct sk_buff_head *xmitq)
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
+ int mtu, int window, u16 peer_caps,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link **link)
{
- struct sk_buff *skb;
- struct sk_buff_head list;
- u16 last_sent;
+ struct tipc_link *l;
- skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
- 0, l->addr, link_own_addr(l), 0, 0, 0);
- if (!skb)
- return;
- last_sent = tipc_bclink_get_last_sent(l->owner->net);
- msg_set_last_bcast(buf_msg(skb), last_sent);
- __skb_queue_head_init(&list);
- __skb_queue_tail(&list, skb);
- tipc_link_xmit(l, &list, xmitq);
+ if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
+ 0, ownnode, peer, peer_caps, bc_sndlink,
+ NULL, inputq, namedq, link))
+ return false;
+
+ l = *link;
+ strcpy(l->name, tipc_bclink_name);
+ tipc_link_reset(l);
+ l->state = LINK_RESET;
+ l->ackers = 0;
+ l->bc_rcvlink = l;
+
+ /* Broadcast send link is always up */
+ if (link_is_bc_sndlink(l))
+ l->state = LINK_ESTABLISHED;
+
+ return true;
}
/**
@@ -451,12 +550,17 @@
/* tipc_link_timeout - perform periodic task as instructed from node timeout
*/
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
+ */
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
int rc = 0;
int mtyp = STATE_MSG;
bool xmit = false;
bool prb = false;
+ u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
+ u16 bc_acked = l->bc_rcvlink->acked;
+ bool bc_up = link_is_up(l->bc_rcvlink);
link_profile_stats(l);
@@ -464,7 +568,7 @@
case LINK_ESTABLISHED:
case LINK_SYNCHING:
if (!l->silent_intv_cnt) {
- if (tipc_bclink_acks_missing(l->owner))
+ if (bc_up && (bc_acked != bc_snt))
xmit = true;
} else if (l->silent_intv_cnt <= l->abort_limit) {
xmit = true;
@@ -555,38 +659,6 @@
}
}
-/**
- * tipc_link_reset_fragments - purge link's inbound message fragments queue
- * @l_ptr: pointer to link
- */
-void tipc_link_reset_fragments(struct tipc_link *l_ptr)
-{
- kfree_skb(l_ptr->reasm_buf);
- l_ptr->reasm_buf = NULL;
-}
-
-void tipc_link_purge_backlog(struct tipc_link *l)
-{
- __skb_queue_purge(&l->backlogq);
- l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
- l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
- l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
- l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
- l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
-}
-
-/**
- * tipc_link_purge_queues - purge all pkt queues associated with link
- * @l_ptr: pointer to link
- */
-void tipc_link_purge_queues(struct tipc_link *l_ptr)
-{
- __skb_queue_purge(&l_ptr->deferdq);
- __skb_queue_purge(&l_ptr->transmq);
- tipc_link_purge_backlog(l_ptr);
- tipc_link_reset_fragments(l_ptr);
-}
-
void tipc_link_reset(struct tipc_link *l)
{
/* Link is down, accept any session */
@@ -598,12 +670,16 @@
/* Prepare for renewed mtu size negotiation */
l->mtu = l->advertised_mtu;
- /* Clean up all queues: */
+ /* Clean up all queues and counters: */
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
skb_queue_splice_init(&l->wakeupq, l->inputq);
-
- tipc_link_purge_backlog(l);
+ __skb_queue_purge(&l->backlogq);
+ l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+ l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+ l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
kfree_skb(l->reasm_buf);
kfree_skb(l->failover_reasm_skb);
l->reasm_buf = NULL;
@@ -611,81 +687,15 @@
l->rcv_unacked = 0;
l->snd_nxt = 1;
l->rcv_nxt = 1;
+ l->acked = 0;
l->silent_intv_cnt = 0;
l->stats.recv_info = 0;
l->stale_count = 0;
+ l->bc_peer_is_up = false;
link_reset_statistics(l);
}
/**
- * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
- * @link: link to use
- * @list: chain of buffers containing message
- *
- * Consumes the buffer chain, except when returning an error code,
- * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
- * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- */
-int __tipc_link_xmit(struct net *net, struct tipc_link *link,
- struct sk_buff_head *list)
-{
- struct tipc_msg *msg = buf_msg(skb_peek(list));
- unsigned int maxwin = link->window;
- unsigned int i, imp = msg_importance(msg);
- uint mtu = link->mtu;
- u16 ack = mod(link->rcv_nxt - 1);
- u16 seqno = link->snd_nxt;
- u16 bc_last_in = link->owner->bclink.last_in;
- struct tipc_media_addr *addr = link->media_addr;
- struct sk_buff_head *transmq = &link->transmq;
- struct sk_buff_head *backlogq = &link->backlogq;
- struct sk_buff *skb, *bskb;
-
- /* Match msg importance against this and all higher backlog limits: */
- for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
- if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
- return link_schedule_user(link, list);
- }
- if (unlikely(msg_size(msg) > mtu))
- return -EMSGSIZE;
-
- /* Prepare each packet for sending, and add to relevant queue: */
- while (skb_queue_len(list)) {
- skb = skb_peek(list);
- msg = buf_msg(skb);
- msg_set_seqno(msg, seqno);
- msg_set_ack(msg, ack);
- msg_set_bcast_ack(msg, bc_last_in);
-
- if (likely(skb_queue_len(transmq) < maxwin)) {
- __skb_dequeue(list);
- __skb_queue_tail(transmq, skb);
- tipc_bearer_send(net, link->bearer_id, skb, addr);
- link->rcv_unacked = 0;
- seqno++;
- continue;
- }
- if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
- kfree_skb(__skb_dequeue(list));
- link->stats.sent_bundled++;
- continue;
- }
- if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
- kfree_skb(__skb_dequeue(list));
- __skb_queue_tail(backlogq, bskb);
- link->backlog[msg_importance(buf_msg(bskb))].len++;
- link->stats.sent_bundled++;
- link->stats.sent_bundles++;
- continue;
- }
- link->backlog[imp].len += skb_queue_len(list);
- skb_queue_splice_tail_init(list, backlogq);
- }
- link->snd_nxt = seqno;
- return 0;
-}
-
-/**
* tipc_link_xmit(): enqueue buffer list according to queue situation
* @link: link to use
* @list: chain of buffers containing message
@@ -705,7 +715,7 @@
unsigned int mtu = l->mtu;
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
- u16 bc_last_in = l->owner->bclink.last_in;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
struct sk_buff_head *transmq = &l->transmq;
struct sk_buff_head *backlogq = &l->backlogq;
struct sk_buff *skb, *_skb, *bskb;
@@ -724,7 +734,7 @@
hdr = buf_msg(skb);
msg_set_seqno(hdr, seqno);
msg_set_ack(hdr, ack);
- msg_set_bcast_ack(hdr, bc_last_in);
+ msg_set_bcast_ack(hdr, bc_ack);
if (likely(skb_queue_len(transmq) < maxwin)) {
_skb = skb_clone(skb, GFP_ATOMIC);
@@ -733,6 +743,7 @@
__skb_dequeue(list);
__skb_queue_tail(transmq, skb);
__skb_queue_tail(xmitq, _skb);
+ TIPC_SKB_CB(skb)->ackers = l->ackers;
l->rcv_unacked = 0;
seqno++;
continue;
@@ -757,62 +768,13 @@
return 0;
}
-/*
- * tipc_link_sync_rcv - synchronize broadcast link endpoints.
- * Receive the sequence number where we should start receiving and
- * acking broadcast packets from a newly added peer node, and open
- * up for reception of such packets.
- *
- * Called with node locked
- */
-static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
-{
- struct tipc_msg *msg = buf_msg(buf);
-
- n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
- n->bclink.recv_permitted = true;
- kfree_skb(buf);
-}
-
-/*
- * tipc_link_push_packets - push unsent packets to bearer
- *
- * Push out the unsent messages of a link where congestion
- * has abated. Node is locked.
- *
- * Called with node locked
- */
-void tipc_link_push_packets(struct tipc_link *link)
-{
- struct sk_buff *skb;
- struct tipc_msg *msg;
- u16 seqno = link->snd_nxt;
- u16 ack = mod(link->rcv_nxt - 1);
-
- while (skb_queue_len(&link->transmq) < link->window) {
- skb = __skb_dequeue(&link->backlogq);
- if (!skb)
- break;
- msg = buf_msg(skb);
- link->backlog[msg_importance(msg)].len--;
- msg_set_ack(msg, ack);
- msg_set_seqno(msg, seqno);
- seqno = mod(seqno + 1);
- msg_set_bcast_ack(msg, link->owner->bclink.last_in);
- link->rcv_unacked = 0;
- __skb_queue_tail(&link->transmq, skb);
- tipc_bearer_send(link->owner->net, link->bearer_id,
- skb, link->media_addr);
- }
- link->snd_nxt = seqno;
-}
-
void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
{
struct sk_buff *skb, *_skb;
struct tipc_msg *hdr;
u16 seqno = l->snd_nxt;
u16 ack = l->rcv_nxt - 1;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
while (skb_queue_len(&l->transmq) < l->window) {
skb = skb_peek(&l->backlogq);
@@ -826,96 +788,35 @@
l->backlog[msg_importance(hdr)].len--;
__skb_queue_tail(&l->transmq, skb);
__skb_queue_tail(xmitq, _skb);
- msg_set_ack(hdr, ack);
+ TIPC_SKB_CB(skb)->ackers = l->ackers;
msg_set_seqno(hdr, seqno);
- msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
l->rcv_unacked = 0;
seqno++;
}
l->snd_nxt = seqno;
}
-static void link_retransmit_failure(struct tipc_link *l_ptr,
- struct sk_buff *buf)
+static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
{
- struct tipc_msg *msg = buf_msg(buf);
- struct net *net = l_ptr->owner->net;
+ struct tipc_msg *hdr = buf_msg(skb);
- pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
-
- if (l_ptr->addr) {
- /* Handle failure on standard link */
- link_print(l_ptr, "Resetting link ");
- pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
- msg_user(msg), msg_type(msg), msg_size(msg),
- msg_errcode(msg));
- pr_info("sqno %u, prev: %x, src: %x\n",
- msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
- } else {
- /* Handle failure on broadcast link */
- struct tipc_node *n_ptr;
- char addr_string[16];
-
- pr_info("Msg seq number: %u, ", msg_seqno(msg));
- pr_cont("Outstanding acks: %lu\n",
- (unsigned long) TIPC_SKB_CB(buf)->handle);
-
- n_ptr = tipc_bclink_retransmit_to(net);
-
- tipc_addr_string_fill(addr_string, n_ptr->addr);
- pr_info("Broadcast link info for %s\n", addr_string);
- pr_info("Reception permitted: %d, Acked: %u\n",
- n_ptr->bclink.recv_permitted,
- n_ptr->bclink.acked);
- pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
- n_ptr->bclink.last_in,
- n_ptr->bclink.oos_state,
- n_ptr->bclink.last_sent);
-
- n_ptr->action_flags |= TIPC_BCAST_RESET;
- l_ptr->stale_count = 0;
- }
+ pr_warn("Retransmission failure on link <%s>\n", l->name);
+ link_print(l, "Resetting link ");
+ pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+ msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
+ pr_info("sqno %u, prev: %x, src: %x\n",
+ msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
}
-void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
- u32 retransmits)
-{
- struct tipc_msg *msg;
-
- if (!skb)
- return;
-
- msg = buf_msg(skb);
-
- /* Detect repeated retransmit failures */
- if (l_ptr->last_retransm == msg_seqno(msg)) {
- if (++l_ptr->stale_count > 100) {
- link_retransmit_failure(l_ptr, skb);
- return;
- }
- } else {
- l_ptr->last_retransm = msg_seqno(msg);
- l_ptr->stale_count = 1;
- }
-
- skb_queue_walk_from(&l_ptr->transmq, skb) {
- if (!retransmits)
- break;
- msg = buf_msg(skb);
- msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
- msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
- l_ptr->media_addr);
- retransmits--;
- l_ptr->stats.retransmitted++;
- }
-}
-
-static int tipc_link_retransm(struct tipc_link *l, int retransm,
- struct sk_buff_head *xmitq)
+int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
+ struct sk_buff_head *xmitq)
{
struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
struct tipc_msg *hdr;
+ u16 ack = l->rcv_nxt - 1;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
if (!skb)
return 0;
@@ -928,19 +829,25 @@
link_retransmit_failure(l, skb);
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
+
+ /* Move forward to where retransmission should start */
skb_queue_walk(&l->transmq, skb) {
- if (!retransm)
- return 0;
+ if (!less(buf_seqno(skb), from))
+ break;
+ }
+
+ skb_queue_walk_from(&l->transmq, skb) {
+ if (more(buf_seqno(skb), to))
+ break;
hdr = buf_msg(skb);
_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
if (!_skb)
return 0;
hdr = buf_msg(_skb);
- msg_set_ack(hdr, l->rcv_nxt - 1);
- msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
_skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, _skb);
- retransm--;
l->stats.retransmitted++;
}
return 0;
@@ -951,11 +858,9 @@
* Consumes buffer if message is of right type
* Node lock must be held
*/
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *inputq)
{
- struct tipc_node *node = link->owner;
-
switch (msg_user(buf_msg(skb))) {
case TIPC_LOW_IMPORTANCE:
case TIPC_MEDIUM_IMPORTANCE:
@@ -965,8 +870,8 @@
skb_queue_tail(inputq, skb);
return true;
case NAME_DISTRIBUTOR:
- node->bclink.recv_permitted = true;
- skb_queue_tail(link->namedq, skb);
+ l->bc_rcvlink->state = LINK_ESTABLISHED;
+ skb_queue_tail(l->namedq, skb);
return true;
case MSG_BUNDLER:
case TUNNEL_PROTOCOL:
@@ -987,7 +892,6 @@
static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *inputq)
{
- struct tipc_node *node = l->owner;
struct tipc_msg *hdr = buf_msg(skb);
struct sk_buff **reasm_skb = &l->reasm_buf;
struct sk_buff *iskb;
@@ -1028,13 +932,15 @@
if (tipc_buf_append(reasm_skb, &skb)) {
l->stats.recv_fragmented++;
tipc_data_input(l, skb, inputq);
- } else if (!*reasm_skb) {
+ } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
+ pr_warn_ratelimited("Unable to build fragment list\n");
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
return 0;
} else if (usr == BCAST_PROTOCOL) {
- tipc_link_sync_rcv(node, skb);
- return 0;
+ tipc_bcast_lock(l->net);
+ tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
+ tipc_bcast_unlock(l->net);
}
drop:
kfree_skb(skb);
@@ -1057,12 +963,28 @@
}
/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
+ *
+ * Note that sending of broadcast ack is coordinated among nodes, to reduce
+ * risk of ack storms towards the sender
*/
-void tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
{
+ if (!l)
+ return 0;
+
+ /* Broadcast ACK must be sent via a unicast link => defer to caller */
+ if (link_is_bc_rcvlink(l)) {
+ if (((l->rcv_nxt ^ link_own_addr(l)) & 0xf) != 0xf)
+ return 0;
+ l->rcv_unacked = 0;
+ return TIPC_LINK_SND_BC_ACK;
+ }
+
+ /* Unicast ACK */
l->rcv_unacked = 0;
l->stats.sent_acks++;
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+ return 0;
}
/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
@@ -1084,6 +1006,9 @@
{
u32 def_cnt = ++l->stats.deferred_recv;
+ if (link_is_bc_rcvlink(l))
+ return;
+
if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
}
@@ -1144,12 +1069,11 @@
l->rcv_nxt++;
l->stats.recv_info++;
if (!tipc_data_input(l, skb, l->inputq))
- rc = tipc_link_input(l, skb, l->inputq);
- if (unlikely(rc))
- break;
+ rc |= tipc_link_input(l, skb, l->inputq);
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
- tipc_link_build_ack_msg(l, xmitq);
-
+ rc |= tipc_link_build_ack_msg(l, xmitq);
+ if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
+ break;
} while ((skb = __skb_dequeue(defq)));
return rc;
@@ -1158,45 +1082,6 @@
return rc;
}
-/**
- * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
- *
- * Returns increase in queue length (i.e. 0 or 1)
- */
-u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
-{
- struct sk_buff *skb1;
- u16 seq_no = buf_seqno(skb);
-
- /* Empty queue ? */
- if (skb_queue_empty(list)) {
- __skb_queue_tail(list, skb);
- return 1;
- }
-
- /* Last ? */
- if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
- __skb_queue_tail(list, skb);
- return 1;
- }
-
- /* Locate insertion point in queue, then insert; discard if duplicate */
- skb_queue_walk(list, skb1) {
- u16 curr_seqno = buf_seqno(skb1);
-
- if (seq_no == curr_seqno) {
- kfree_skb(skb);
- return 0;
- }
-
- if (less(seq_no, curr_seqno))
- break;
- }
-
- __skb_queue_before(list, skb1, skb);
- return 1;
-}
-
/*
* Send protocol message to the other endpoint.
*/
@@ -1212,23 +1097,17 @@
skb = __skb_dequeue(&xmitq);
if (!skb)
return;
- tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+ tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
l->rcv_unacked = 0;
- kfree_skb(skb);
}
-/* tipc_link_build_proto_msg: prepare link protocol message for transmission
- */
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
u16 rcvgap, int tolerance, int priority,
struct sk_buff_head *xmitq)
{
struct sk_buff *skb = NULL;
struct tipc_msg *hdr = l->pmsg;
- u16 snd_nxt = l->snd_nxt;
- u16 rcv_nxt = l->rcv_nxt;
- u16 rcv_last = rcv_nxt - 1;
- int node_up = l->owner->bclink.recv_permitted;
+ bool node_up = link_is_up(l->bc_rcvlink);
/* Don't send protocol message during reset or link failover */
if (tipc_link_is_blocked(l))
@@ -1236,33 +1115,34 @@
msg_set_type(hdr, mtyp);
msg_set_net_plane(hdr, l->net_plane);
- msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
- msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+ msg_set_next_sent(hdr, l->snd_nxt);
+ msg_set_ack(hdr, l->rcv_nxt - 1);
+ msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
+ msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
msg_set_link_tolerance(hdr, tolerance);
msg_set_linkprio(hdr, priority);
msg_set_redundant_link(hdr, node_up);
msg_set_seq_gap(hdr, 0);
/* Compatibility: created msg must not be in sequence with pkt flow */
- msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+ msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
if (mtyp == STATE_MSG) {
if (!tipc_link_is_up(l))
return;
- msg_set_next_sent(hdr, snd_nxt);
/* Override rcvgap if there are packets in deferred queue */
if (!skb_queue_empty(&l->deferdq))
- rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+ rcvgap = buf_seqno(skb_peek(&l->deferdq)) - l->rcv_nxt;
if (rcvgap) {
msg_set_seq_gap(hdr, rcvgap);
l->stats.sent_nacks++;
}
- msg_set_ack(hdr, rcv_last);
msg_set_probe(hdr, probe);
if (probe)
l->stats.sent_probes++;
l->stats.sent_states++;
+ l->rcv_unacked = 0;
} else {
/* RESET_MSG or ACTIVATE_MSG */
msg_set_max_pkt(hdr, l->advertised_mtu);
@@ -1354,7 +1234,8 @@
{
struct tipc_msg *hdr = buf_msg(skb);
u16 rcvgap = 0;
- u16 nacked_gap = msg_seq_gap(hdr);
+ u16 ack = msg_ack(hdr);
+ u16 gap = msg_seq_gap(hdr);
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
@@ -1363,7 +1244,7 @@
char *if_name;
int rc = 0;
- if (tipc_link_is_blocked(l))
+ if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
if (link_own_addr(l) > msg_prevnode(hdr))
@@ -1433,11 +1314,11 @@
if (rcvgap || (msg_probe(hdr)))
tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
0, 0, xmitq);
- tipc_link_release_pkts(l, msg_ack(hdr));
+ tipc_link_release_pkts(l, ack);
/* If NACK, retransmit will now start at right position */
- if (nacked_gap) {
- rc = tipc_link_retransm(l, nacked_gap, xmitq);
+ if (gap) {
+ rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
l->stats.recv_nacks++;
}
@@ -1450,6 +1331,188 @@
return rc;
}
+/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
+ */
+static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
+ u16 peers_snd_nxt,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb;
+ struct tipc_msg *hdr;
+ struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
+ u16 ack = l->rcv_nxt - 1;
+ u16 gap_to = peers_snd_nxt - 1;
+
+ skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+ 0, l->addr, link_own_addr(l), 0, 0, 0);
+ if (!skb)
+ return false;
+ hdr = buf_msg(skb);
+ msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
+ msg_set_bcast_ack(hdr, ack);
+ msg_set_bcgap_after(hdr, ack);
+ if (dfrd_skb)
+ gap_to = buf_seqno(dfrd_skb) - 1;
+ msg_set_bcgap_to(hdr, gap_to);
+ msg_set_non_seq(hdr, bcast);
+ __skb_queue_tail(xmitq, skb);
+ return true;
+}
+
+/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+ if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
+ return;
+ tipc_link_xmit(l, &list, xmitq);
+}
+
+/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
+ */
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ int mtyp = msg_type(hdr);
+ u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+
+ if (link_is_up(l))
+ return;
+
+ if (msg_user(hdr) == BCAST_PROTOCOL) {
+ l->rcv_nxt = peers_snd_nxt;
+ l->state = LINK_ESTABLISHED;
+ return;
+ }
+
+ if (l->peer_caps & TIPC_BCAST_SYNCH)
+ return;
+
+ if (msg_peer_node_is_up(hdr))
+ return;
+
+ /* Compatibility: accept older, less safe initial synch data */
+ if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
+ l->rcv_nxt = peers_snd_nxt;
+}
+
+/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
+ */
+void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+ struct sk_buff_head *xmitq)
+{
+ u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+
+ if (!link_is_up(l))
+ return;
+
+ if (!msg_peer_node_is_up(hdr))
+ return;
+
+ l->bc_peer_is_up = true;
+
+ /* Ignore if peers_snd_nxt goes beyond receive window */
+ if (more(peers_snd_nxt, l->rcv_nxt + l->window))
+ return;
+
+ if (!more(peers_snd_nxt, l->rcv_nxt)) {
+ l->nack_state = BC_NACK_SND_CONDITIONAL;
+ return;
+ }
+
+ /* Don't NACK if one was recently sent or peeked */
+ if (l->nack_state == BC_NACK_SND_SUPPRESS) {
+ l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+ return;
+ }
+
+ /* Conditionally delay NACK sending until next synch rcv */
+ if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
+ l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+ if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
+ return;
+ }
+
+ /* Send NACK now but suppress next one */
+ tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
+ l->nack_state = BC_NACK_SND_SUPPRESS;
+}
+
+void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb, *tmp;
+ struct tipc_link *snd_l = l->bc_sndlink;
+
+ if (!link_is_up(l) || !l->bc_peer_is_up)
+ return;
+
+ if (!more(acked, l->acked))
+ return;
+
+ /* Skip over packets peer has already acked */
+ skb_queue_walk(&snd_l->transmq, skb) {
+ if (more(buf_seqno(skb), l->acked))
+ break;
+ }
+
+ /* Update/release the packets peer is acking now */
+ skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
+ if (more(buf_seqno(skb), acked))
+ break;
+ if (!--TIPC_SKB_CB(skb)->ackers) {
+ __skb_unlink(skb, &snd_l->transmq);
+ kfree_skb(skb);
+ }
+ }
+ l->acked = acked;
+ tipc_link_advance_backlog(snd_l, xmitq);
+ if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
+ link_prepare_wakeup(snd_l);
+}
+
+/* tipc_link_bc_nack_rcv(): receive broadcast nack message
+ */
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 dnode = msg_destnode(hdr);
+ int mtyp = msg_type(hdr);
+ u16 acked = msg_bcast_ack(hdr);
+ u16 from = acked + 1;
+ u16 to = msg_bcgap_to(hdr);
+ u16 peers_snd_nxt = to + 1;
+ int rc = 0;
+
+ kfree_skb(skb);
+
+ if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
+ return 0;
+
+ if (mtyp != STATE_MSG)
+ return 0;
+
+ if (dnode == link_own_addr(l)) {
+ tipc_link_bc_ack_rcv(l, acked, xmitq);
+ rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
+ l->stats.recv_nacks++;
+ return rc;
+ }
+
+ /* Msg for other node => suppress own NACK at next sync if applicable */
+ if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
+ l->nack_state = BC_NACK_SND_SUPPRESS;
+
+ return 0;
+}
+
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
{
int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
@@ -1514,7 +1577,7 @@
static void link_print(struct tipc_link *l, const char *str)
{
struct sk_buff *hskb = skb_peek(&l->transmq);
- u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+ u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
u16 tail = l->snd_nxt - 1;
pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
@@ -1738,7 +1801,7 @@
if (tipc_link_is_up(link))
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
goto attr_msg_full;
- if (tipc_link_is_active(link))
+ if (link->active)
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
goto attr_msg_full;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 0201212..66d859b 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -66,7 +66,8 @@
*/
enum {
TIPC_LINK_UP_EVT = 1,
- TIPC_LINK_DOWN_EVT = (1 << 1)
+ TIPC_LINK_DOWN_EVT = (1 << 1),
+ TIPC_LINK_SND_BC_ACK = (1 << 2)
};
/* Starting value for maximum packet size negotiation on unicast links
@@ -110,7 +111,7 @@
* @name: link name character string
* @media_addr: media address to use when sending messages over link
* @timer: link timer
- * @owner: pointer to peer node
+ * @net: pointer to namespace struct
* @refcnt: reference counter for permanent references (owner node & timer)
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
@@ -119,6 +120,7 @@
* @keepalive_intv: link keepalive timer interval
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
+ * @peer_caps: bitmap describing capabilities of peer node
* @silent_intv_cnt: # of timer intervals without any reception from peer
* @proto_msg: template for control messages generated by link
* @pmsg: convenience pointer to "proto_msg" field
@@ -134,6 +136,8 @@
* @snt_nxt: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
* @stale_count: # of identical retransmit requests made by peer
+ * @ackers: # of peers that needs to ack each packet before it can be released
+ * @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
* @deferred_queue: deferred queue saved OOS b'cast message received from node
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
@@ -143,13 +147,14 @@
* @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @reasm_buf: head of partially reassembled inbound message fragments
+ * @bc_rcvr: marks that this is a broadcast receiver link
* @stats: collects statistics regarding link activity
*/
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
struct tipc_media_addr *media_addr;
- struct tipc_node *owner;
+ struct net *net;
/* Management and link supervision data */
u32 peer_session;
@@ -159,6 +164,8 @@
unsigned long keepalive_intv;
u32 abort_limit;
u32 state;
+ u16 peer_caps;
+ bool active;
u32 silent_intv_cnt;
struct {
unchar hdr[INT_H_SIZE];
@@ -201,18 +208,35 @@
/* Fragmentation/reassembly */
struct sk_buff *reasm_buf;
+ /* Broadcast */
+ u16 ackers;
+ u16 acked;
+ struct tipc_link *bc_rcvlink;
+ struct tipc_link *bc_sndlink;
+ int nack_state;
+ bool bc_peer_is_up;
+
/* Statistics */
struct tipc_stats stats;
};
-bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
- u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
- struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+ int tolerance, char net_plane, u32 mtu, int priority,
+ int window, u32 session, u32 ownnode, u32 peer,
+ u16 peer_caps,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link *bc_rcvlink,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
struct tipc_link **link);
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
+ int mtu, int window, u16 peer_caps,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link **link);
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq);
-void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
- struct sk_buff_head *xmitq);
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_fsm_evt(struct tipc_link *l, int evt);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
@@ -223,23 +247,11 @@
bool tipc_link_is_synching(struct tipc_link *l);
bool tipc_link_is_failingover(struct tipc_link *l);
bool tipc_link_is_blocked(struct tipc_link *l);
-int tipc_link_is_active(struct tipc_link *l_ptr);
-void tipc_link_purge_queues(struct tipc_link *l_ptr);
-void tipc_link_purge_backlog(struct tipc_link *l);
+void tipc_link_set_active(struct tipc_link *l, bool active);
void tipc_link_reset(struct tipc_link *l_ptr);
-int __tipc_link_xmit(struct net *net, struct tipc_link *link,
- struct sk_buff_head *list);
int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list,
struct sk_buff_head *xmitq);
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
- u32 gap, u32 tolerance, u32 priority);
-void tipc_link_push_packets(struct tipc_link *l_ptr);
-u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
-void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
-void tipc_link_retransmit(struct tipc_link *l_ptr,
- struct sk_buff *start, u32 retransmits);
-struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
- const struct sk_buff *skb);
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
@@ -249,5 +261,23 @@
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
-
+int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *uc_l,
+ struct sk_buff_head *xmitq);
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *rcv_l,
+ struct sk_buff_head *xmitq);
+int tipc_link_bc_peers(struct tipc_link *l);
+void tipc_link_set_mtu(struct tipc_link *l, int mtu);
+int tipc_link_mtu(struct tipc_link *l);
+void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
+ struct sk_buff_head *xmitq);
+void tipc_link_build_bc_sync_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr);
+void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+ struct sk_buff_head *xmitq);
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 454f5ec..8740930 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -121,7 +121,7 @@
{
struct sk_buff *head = *headbuf;
struct sk_buff *frag = *buf;
- struct sk_buff *tail;
+ struct sk_buff *tail = NULL;
struct tipc_msg *msg;
u32 fragid;
int delta;
@@ -141,9 +141,15 @@
if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
goto err;
head = *headbuf = frag;
- skb_frag_list_init(head);
- TIPC_SKB_CB(head)->tail = NULL;
*buf = NULL;
+ TIPC_SKB_CB(head)->tail = NULL;
+ if (skb_is_nonlinear(head)) {
+ skb_walk_frags(head, tail) {
+ TIPC_SKB_CB(head)->tail = tail;
+ }
+ } else {
+ skb_frag_list_init(head);
+ }
return 0;
}
@@ -176,7 +182,6 @@
*buf = NULL;
return 0;
err:
- pr_warn_ratelimited("Unable to build fragment list\n");
kfree_skb(*buf);
kfree_skb(*headbuf);
*buf = *headbuf = NULL;
@@ -559,18 +564,22 @@
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
* reassemble the clones into one message
*/
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
{
- struct sk_buff *skb;
+ struct sk_buff *skb, *_skb;
struct sk_buff *frag = NULL;
struct sk_buff *head = NULL;
- int hdr_sz;
+ int hdr_len;
/* Copy header if single buffer */
if (skb_queue_len(list) == 1) {
skb = skb_peek(list);
- hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
- return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
+ hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
+ _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
+ if (!_skb)
+ return false;
+ __skb_queue_tail(rcvq, _skb);
+ return true;
}
/* Clone all fragments and reassemble */
@@ -584,11 +593,12 @@
if (!head)
goto error;
}
- return frag;
+ __skb_queue_tail(rcvq, frag);
+ return true;
error:
pr_warn("Failed do clone local mcast rcv buffer\n");
kfree_skb(head);
- return NULL;
+ return false;
}
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 9f0ef54..55778a0 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -112,6 +112,7 @@
bool wakeup_pending;
u16 chain_sz;
u16 chain_imp;
+ u16 ackers;
};
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
@@ -600,6 +601,11 @@
return msg_bits(m, 4, 16, 0xffff);
}
+static inline u32 msg_bc_snd_nxt(struct tipc_msg *m)
+{
+ return msg_last_bcast(m) + 1;
+}
+
static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 4, 16, 0xffff, n);
@@ -789,7 +795,7 @@
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
struct sk_buff *skb);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index e6018b7..c07612b 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -102,7 +102,7 @@
if (!oskb)
break;
msg_set_destnode(buf_msg(oskb), dnode);
- tipc_node_xmit_skb(net, oskb, dnode, dnode);
+ tipc_node_xmit_skb(net, oskb, dnode, 0);
}
rcu_read_unlock();
@@ -223,7 +223,7 @@
&tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
rcu_read_unlock();
- tipc_node_xmit(net, &head, dnode, dnode);
+ tipc_node_xmit(net, &head, dnode, 0);
}
static void tipc_publ_subscribe(struct net *net, struct publication *publ,
diff --git a/net/tipc/net.c b/net/tipc/net.c
index d6d1399..77bf911 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -112,14 +112,11 @@
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
char addr_string[16];
- int res;
tn->own_addr = addr;
tipc_named_reinit(net);
tipc_sk_reinit(net);
- res = tipc_bclink_init(net);
- if (res)
- return res;
+ tipc_bcast_reinit(net);
tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
TIPC_ZONE_SCOPE, 0, tn->own_addr);
@@ -142,7 +139,6 @@
tn->own_addr);
rtnl_lock();
tipc_bearer_stop(net);
- tipc_bclink_stop(net);
tipc_node_stop(net);
rtnl_unlock();
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2670751..20cddec 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -72,7 +72,6 @@
static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
bool delete);
static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
-static void node_established_contact(struct tipc_node *n_ptr);
static void tipc_node_delete(struct tipc_node *node);
static void tipc_node_timeout(unsigned long data);
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
@@ -165,8 +164,10 @@
INIT_LIST_HEAD(&n_ptr->list);
INIT_LIST_HEAD(&n_ptr->publ_list);
INIT_LIST_HEAD(&n_ptr->conn_sks);
- skb_queue_head_init(&n_ptr->bclink.namedq);
- __skb_queue_head_init(&n_ptr->bclink.deferdq);
+ skb_queue_head_init(&n_ptr->bc_entry.namedq);
+ skb_queue_head_init(&n_ptr->bc_entry.inputq1);
+ __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
+ skb_queue_head_init(&n_ptr->bc_entry.inputq2);
hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
if (n_ptr->addr < temp_node->addr)
@@ -177,6 +178,18 @@
n_ptr->signature = INVALID_NODE_SIG;
n_ptr->active_links[0] = INVALID_BEARER_ID;
n_ptr->active_links[1] = INVALID_BEARER_ID;
+ if (!tipc_link_bc_create(net, tipc_own_addr(net), n_ptr->addr,
+ U16_MAX, tipc_bc_sndlink(net)->window,
+ n_ptr->capabilities,
+ &n_ptr->bc_entry.inputq1,
+ &n_ptr->bc_entry.namedq,
+ tipc_bc_sndlink(net),
+ &n_ptr->bc_entry.link)) {
+ pr_warn("Broadcast rcv link creation failed, no memory\n");
+ kfree(n_ptr);
+ n_ptr = NULL;
+ goto exit;
+ }
tipc_node_get(n_ptr);
setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
n_ptr->keepalive_intv = U32_MAX;
@@ -203,6 +216,7 @@
{
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
+ kfree(node->bc_entry.link);
kfree_rcu(node, rcu);
}
@@ -332,6 +346,7 @@
n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+ tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
pr_debug("Established link <%s> on network plane %c\n",
nl->name, nl->net_plane);
@@ -340,8 +355,9 @@
if (!ol) {
*slot0 = bearer_id;
*slot1 = bearer_id;
- tipc_link_build_bcast_sync_msg(nl, xmitq);
- node_established_contact(n);
+ tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+ n->action_flags |= TIPC_NOTIFY_NODE_UP;
+ tipc_bcast_add_peer(n->net, nl, xmitq);
return;
}
@@ -350,8 +366,11 @@
pr_debug("Old link <%s> becomes standby\n", ol->name);
*slot0 = bearer_id;
*slot1 = bearer_id;
+ tipc_link_set_active(nl, true);
+ tipc_link_set_active(ol, false);
} else if (nl->priority == ol->priority) {
- *slot0 = bearer_id;
+ tipc_link_set_active(nl, true);
+ *slot1 = bearer_id;
} else {
pr_debug("New link <%s> is standby\n", nl->name);
}
@@ -428,8 +447,10 @@
tipc_link_build_reset_msg(l, xmitq);
*maddr = &n->links[*bearer_id].maddr;
node_lost_contact(n, &le->inputq);
+ tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
return;
}
+ tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
/* There is still a working link => initiate failover */
tnl = node_active_link(n, 0);
@@ -493,6 +514,7 @@
bool link_up = false;
bool accept_addr = false;
bool reset = true;
+ char *if_name;
*dupl_addr = false;
*respond = false;
@@ -579,9 +601,15 @@
pr_warn("Cannot establish 3rd link to %x\n", n->addr);
goto exit;
}
- if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
- tipc_own_addr(net), onode, &le->maddr,
- &le->inputq, &n->bclink.namedq, &l)) {
+ if_name = strchr(b->name, ':') + 1;
+ if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
+ b->net_plane, b->mtu, b->priority,
+ b->window, mod(tipc_net(net)->random),
+ tipc_own_addr(net), onode,
+ n->capabilities,
+ tipc_bc_sndlink(n->net), n->bc_entry.link,
+ &le->inputq,
+ &n->bc_entry.namedq, &l)) {
*respond = false;
goto exit;
}
@@ -824,58 +852,36 @@
return true;
}
-static void node_established_contact(struct tipc_node *n_ptr)
-{
- tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
- n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
- n_ptr->bclink.oos_state = 0;
- n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
- tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
-}
-
-static void node_lost_contact(struct tipc_node *n_ptr,
+static void node_lost_contact(struct tipc_node *n,
struct sk_buff_head *inputq)
{
char addr_string[16];
struct tipc_sock_conn *conn, *safe;
struct tipc_link *l;
- struct list_head *conns = &n_ptr->conn_sks;
+ struct list_head *conns = &n->conn_sks;
struct sk_buff *skb;
- struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
uint i;
pr_debug("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ tipc_addr_string_fill(addr_string, n->addr));
- /* Flush broadcast link info associated with lost node */
- if (n_ptr->bclink.recv_permitted) {
- __skb_queue_purge(&n_ptr->bclink.deferdq);
-
- if (n_ptr->bclink.reasm_buf) {
- kfree_skb(n_ptr->bclink.reasm_buf);
- n_ptr->bclink.reasm_buf = NULL;
- }
-
- tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
- tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
-
- n_ptr->bclink.recv_permitted = false;
- }
+ /* Clean up broadcast state */
+ tipc_bcast_remove_peer(n->net, n->bc_entry.link);
/* Abort any ongoing link failover */
for (i = 0; i < MAX_BEARERS; i++) {
- l = n_ptr->links[i].link;
+ l = n->links[i].link;
if (l)
tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
}
/* Notify publications from this node */
- n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+ n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
/* Notify sockets connected to node */
list_for_each_entry_safe(conn, safe, conns, list) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
- SHORT_H_SIZE, 0, tn->own_addr,
+ SHORT_H_SIZE, 0, tipc_own_addr(n->net),
conn->peer_node, conn->port,
conn->peer_port, TIPC_ERR_NO_NODE);
if (likely(skb))
@@ -937,18 +943,13 @@
publ_list = &node->publ_list;
node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
- TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
- TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
- TIPC_BCAST_RESET);
+ TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
spin_unlock_bh(&node->lock);
if (flags & TIPC_NOTIFY_NODE_DOWN)
tipc_publ_notify(net, publ_list, addr);
- if (flags & TIPC_WAKEUP_BCAST_USERS)
- tipc_bclink_wakeup_users(net);
-
if (flags & TIPC_NOTIFY_NODE_UP)
tipc_named_node_up(net, addr);
@@ -960,11 +961,6 @@
tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
link_id, addr);
- if (flags & TIPC_BCAST_MSG_EVT)
- tipc_bclink_input(net);
-
- if (flags & TIPC_BCAST_RESET)
- tipc_node_reset_links(node);
}
/* Caller should hold node lock for the passed node */
@@ -1080,6 +1076,67 @@
}
/**
+ * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer_id: id of bearer message arrived on
+ *
+ * Invoked with no locks held.
+ */
+static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
+{
+ int rc;
+ struct sk_buff_head xmitq;
+ struct tipc_bclink_entry *be;
+ struct tipc_link_entry *le;
+ struct tipc_msg *hdr = buf_msg(skb);
+ int usr = msg_user(hdr);
+ u32 dnode = msg_destnode(hdr);
+ struct tipc_node *n;
+
+ __skb_queue_head_init(&xmitq);
+
+ /* If NACK for other node, let rcv link for that node peek into it */
+ if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
+ n = tipc_node_find(net, dnode);
+ else
+ n = tipc_node_find(net, msg_prevnode(hdr));
+ if (!n) {
+ kfree_skb(skb);
+ return;
+ }
+ be = &n->bc_entry;
+ le = &n->links[bearer_id];
+
+ rc = tipc_bcast_rcv(net, be->link, skb);
+
+ /* Broadcast link reset may happen at reassembly failure */
+ if (rc & TIPC_LINK_DOWN_EVT)
+ tipc_node_reset_links(n);
+
+ /* Broadcast ACKs are sent on a unicast link */
+ if (rc & TIPC_LINK_SND_BC_ACK) {
+ tipc_node_lock(n);
+ tipc_link_build_ack_msg(le->link, &xmitq);
+ tipc_node_unlock(n);
+ }
+
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+ /* Deliver. 'arrvq' is under inputq2's lock protection */
+ if (!skb_queue_empty(&be->inputq1)) {
+ spin_lock_bh(&be->inputq2.lock);
+ spin_lock_bh(&be->inputq1.lock);
+ skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
+ spin_unlock_bh(&be->inputq1.lock);
+ spin_unlock_bh(&be->inputq2.lock);
+ tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
+ }
+ tipc_node_put(n);
+}
+
+/**
* tipc_node_check_state - check and if necessary update node state
* @skb: TIPC packet
* @bearer_id: identity of bearer delivering the packet
@@ -1221,6 +1278,7 @@
int usr = msg_user(hdr);
int bearer_id = b->identity;
struct tipc_link_entry *le;
+ u16 bc_ack = msg_bcast_ack(hdr);
int rc = 0;
__skb_queue_head_init(&xmitq);
@@ -1229,13 +1287,12 @@
if (unlikely(!tipc_msg_validate(skb)))
goto discard;
- /* Handle arrival of a non-unicast link packet */
+ /* Handle arrival of discovery or broadcast packet */
if (unlikely(msg_non_seq(hdr))) {
- if (usr == LINK_CONFIG)
- tipc_disc_rcv(net, skb, b);
+ if (unlikely(usr == LINK_CONFIG))
+ return tipc_disc_rcv(net, skb, b);
else
- tipc_bclink_rcv(net, skb);
- return;
+ return tipc_node_bc_rcv(net, skb, bearer_id);
}
/* Locate neighboring node that sent packet */
@@ -1244,19 +1301,18 @@
goto discard;
le = &n->links[bearer_id];
+ /* Ensure broadcast reception is in synch with peer's send state */
+ if (unlikely(usr == LINK_PROTOCOL))
+ tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
+ else if (unlikely(n->bc_entry.link->acked != bc_ack))
+ tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
+
tipc_node_lock(n);
/* Is reception permitted at the moment ? */
if (!tipc_node_filter_pkt(n, hdr))
goto unlock;
- if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
- tipc_bclink_sync_state(n, hdr);
-
- /* Release acked broadcast packets */
- if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
- tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
-
/* Check and if necessary update node state */
if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
rc = tipc_link_rcv(le->link, skb, &xmitq);
@@ -1271,8 +1327,8 @@
if (unlikely(rc & TIPC_LINK_DOWN_EVT))
tipc_node_link_down(n, bearer_id, false);
- if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
- tipc_named_rcv(net, &n->bclink.namedq);
+ if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
+ tipc_named_rcv(net, &n->bc_entry.namedq);
if (!skb_queue_empty(&le->inputq))
tipc_sk_rcv(net, &le->inputq);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 344b3e7..6734562 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -55,36 +55,18 @@
enum {
TIPC_NOTIFY_NODE_DOWN = (1 << 3),
TIPC_NOTIFY_NODE_UP = (1 << 4),
- TIPC_WAKEUP_BCAST_USERS = (1 << 5),
TIPC_NOTIFY_LINK_UP = (1 << 6),
- TIPC_NOTIFY_LINK_DOWN = (1 << 7),
- TIPC_BCAST_MSG_EVT = (1 << 9),
- TIPC_BCAST_RESET = (1 << 10)
+ TIPC_NOTIFY_LINK_DOWN = (1 << 7)
};
-/**
- * struct tipc_node_bclink - TIPC node bclink structure
- * @acked: sequence # of last outbound b'cast message acknowledged by node
- * @last_in: sequence # of last in-sequence b'cast message received from node
- * @last_sent: sequence # of last b'cast message sent by node
- * @oos_state: state tracker for handling OOS b'cast messages
- * @deferred_queue: deferred queue saved OOS b'cast message received from node
- * @reasm_buf: broadcast reassembly queue head from node
- * @inputq_map: bitmap indicating which inqueues should be kicked
- * @recv_permitted: true if node is allowed to receive b'cast messages
+/* Optional capabilities supported by this code version
*/
-struct tipc_node_bclink {
- u32 acked;
- u32 last_in;
- u32 last_sent;
- u32 oos_state;
- u32 deferred_size;
- struct sk_buff_head deferdq;
- struct sk_buff *reasm_buf;
- struct sk_buff_head namedq;
- bool recv_permitted;
+enum {
+ TIPC_BCAST_SYNCH = (1 << 1)
};
+#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH
+
struct tipc_link_entry {
struct tipc_link *link;
u32 mtu;
@@ -92,6 +74,14 @@
struct tipc_media_addr maddr;
};
+struct tipc_bclink_entry {
+ struct tipc_link *link;
+ struct sk_buff_head inputq1;
+ struct sk_buff_head arrvq;
+ struct sk_buff_head inputq2;
+ struct sk_buff_head namedq;
+};
+
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
@@ -104,7 +94,6 @@
* @active_links: bearer ids of active links, used as index into links[] array
* @links: array containing references to all links to node
* @action_flags: bit mask of different types of node actions
- * @bclink: broadcast-related info
* @state: connectivity state vs peer node
* @sync_point: sequence number where synch/failover is finished
* @list: links to adjacent nodes in sorted list of cluster's nodes
@@ -124,8 +113,8 @@
struct hlist_node hash;
int active_links[2];
struct tipc_link_entry links[MAX_BEARERS];
+ struct tipc_bclink_entry bc_entry;
int action_flags;
- struct tipc_node_bclink bclink;
struct list_head list;
int state;
u16 sync_point;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1060d52..552dbab 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -689,13 +689,13 @@
msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
new_mtu:
- mtu = tipc_bclink_get_mtu();
+ mtu = tipc_bcast_get_mtu(net);
rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
if (unlikely(rc < 0))
return rc;
do {
- rc = tipc_bclink_xmit(net, pktchain);
+ rc = tipc_bcast_xmit(net, pktchain);
if (likely(!rc))
return dsz;
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 9bc0b1e..816914e 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,6 +52,8 @@
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
+#define UDP_MIN_HEADROOM 28
+
static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
[TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
[TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
@@ -153,11 +155,12 @@
struct udp_bearer *ub;
struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
- struct sk_buff *clone;
struct rtable *rt;
- clone = skb_clone(skb, GFP_ATOMIC);
- skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
+ if (skb_headroom(skb) < UDP_MIN_HEADROOM)
+ pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+
+ skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
ub = rcu_dereference_rtnl(b->media_ptr);
if (!ub) {
err = -ENODEV;
@@ -167,7 +170,7 @@
struct flowi4 fl = {
.daddr = dst->ipv4.s_addr,
.saddr = src->ipv4.s_addr,
- .flowi4_mark = clone->mark,
+ .flowi4_mark = skb->mark,
.flowi4_proto = IPPROTO_UDP
};
rt = ip_route_output_key(net, &fl);
@@ -176,7 +179,7 @@
goto tx_error;
}
ttl = ip4_dst_hoplimit(&rt->dst);
- err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone,
+ err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb,
src->ipv4.s_addr,
dst->ipv4.s_addr, 0, ttl, 0,
src->udp_port, dst->udp_port,
@@ -199,7 +202,7 @@
if (err)
goto tx_error;
ttl = ip6_dst_hoplimit(ndst);
- err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone,
+ err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
ndst->dev, &src->ipv6,
&dst->ipv6, 0, ttl, src->udp_port,
dst->udp_port, false);
@@ -208,7 +211,7 @@
return err;
tx_error:
- kfree_skb(clone);
+ kfree_skb(skb);
return err;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 94f6582..aaa0b58 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,9 +326,10 @@
return s;
}
-static inline int unix_writable(struct sock *sk)
+static int unix_writable(const struct sock *sk)
{
- return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ return sk->sk_state != TCP_LISTEN &&
+ (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
}
static void unix_write_space(struct sock *sk)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index df5fc6b..00e8a34 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1948,13 +1948,13 @@
err = misc_register(&vsock_device);
if (err) {
pr_err("Failed to register misc device\n");
- return -ENOENT;
+ goto err_reset_transport;
}
err = proto_register(&vsock_proto, 1); /* we want our slab */
if (err) {
pr_err("Cannot register vsock protocol\n");
- goto err_misc_deregister;
+ goto err_deregister_misc;
}
err = sock_register(&vsock_family_ops);
@@ -1969,8 +1969,9 @@
err_unregister_proto:
proto_unregister(&vsock_proto);
-err_misc_deregister:
+err_deregister_misc:
misc_deregister(&vsock_device);
+err_reset_transport:
transport = NULL;
err_busy:
mutex_unlock(&vsock_register_mutex);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 1f63daf..7555cad 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -40,13 +40,11 @@
static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
-static void vmci_transport_peer_attach_cb(u32 sub_id,
- const struct vmci_event_data *ed,
- void *client_data);
static void vmci_transport_peer_detach_cb(u32 sub_id,
const struct vmci_event_data *ed,
void *client_data);
static void vmci_transport_recv_pkt_work(struct work_struct *work);
+static void vmci_transport_cleanup(struct work_struct *work);
static int vmci_transport_recv_listen(struct sock *sk,
struct vmci_transport_packet *pkt);
static int vmci_transport_recv_connecting_server(
@@ -75,6 +73,10 @@
struct vmci_transport_packet pkt;
};
+static LIST_HEAD(vmci_transport_cleanup_list);
+static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
+static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
+
static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
VMCI_INVALID_ID };
static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
@@ -791,44 +793,6 @@
return err;
}
-static void vmci_transport_peer_attach_cb(u32 sub_id,
- const struct vmci_event_data *e_data,
- void *client_data)
-{
- struct sock *sk = client_data;
- const struct vmci_event_payload_qp *e_payload;
- struct vsock_sock *vsk;
-
- e_payload = vmci_event_data_const_payload(e_data);
-
- vsk = vsock_sk(sk);
-
- /* We don't ask for delayed CBs when we subscribe to this event (we
- * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
- * guarantees in that case about what context we might be running in,
- * so it could be BH or process, blockable or non-blockable. So we
- * need to account for all possible contexts here.
- */
- local_bh_disable();
- bh_lock_sock(sk);
-
- /* XXX This is lame, we should provide a way to lookup sockets by
- * qp_handle.
- */
- if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
- e_payload->handle)) {
- /* XXX This doesn't do anything, but in the future we may want
- * to set a flag here to verify the attach really did occur and
- * we weren't just sent a datagram claiming it was.
- */
- goto out;
- }
-
-out:
- bh_unlock_sock(sk);
- local_bh_enable();
-}
-
static void vmci_transport_handle_detach(struct sock *sk)
{
struct vsock_sock *vsk;
@@ -871,28 +835,38 @@
const struct vmci_event_data *e_data,
void *client_data)
{
- struct sock *sk = client_data;
+ struct vmci_transport *trans = client_data;
const struct vmci_event_payload_qp *e_payload;
- struct vsock_sock *vsk;
e_payload = vmci_event_data_const_payload(e_data);
- vsk = vsock_sk(sk);
- if (vmci_handle_is_invalid(e_payload->handle))
- return;
-
- /* Same rules for locking as for peer_attach_cb(). */
- local_bh_disable();
- bh_lock_sock(sk);
/* XXX This is lame, we should provide a way to lookup sockets by
* qp_handle.
*/
- if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
- e_payload->handle))
- vmci_transport_handle_detach(sk);
+ if (vmci_handle_is_invalid(e_payload->handle) ||
+ vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+ return;
- bh_unlock_sock(sk);
- local_bh_enable();
+ /* We don't ask for delayed CBs when we subscribe to this event (we
+ * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
+ * guarantees in that case about what context we might be running in,
+ * so it could be BH or process, blockable or non-blockable. So we
+ * need to account for all possible contexts here.
+ */
+ spin_lock_bh(&trans->lock);
+ if (!trans->sk)
+ goto out;
+
+ /* Apart from here, trans->lock is only grabbed as part of sk destruct,
+ * where trans->sk isn't locked.
+ */
+ bh_lock_sock(trans->sk);
+
+ vmci_transport_handle_detach(trans->sk);
+
+ bh_unlock_sock(trans->sk);
+ out:
+ spin_unlock_bh(&trans->lock);
}
static void vmci_transport_qp_resumed_cb(u32 sub_id,
@@ -1181,7 +1155,7 @@
*/
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
vmci_transport_peer_detach_cb,
- pending, &detach_sub_id);
+ vmci_trans(vpending), &detach_sub_id);
if (err < VMCI_SUCCESS) {
vmci_transport_send_reset(pending, pkt);
err = vmci_transport_error_to_vsock_error(err);
@@ -1321,7 +1295,6 @@
|| vmci_trans(vsk)->qpair
|| vmci_trans(vsk)->produce_size != 0
|| vmci_trans(vsk)->consume_size != 0
- || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
|| vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
skerr = EPROTO;
err = -EINVAL;
@@ -1389,7 +1362,6 @@
struct vsock_sock *vsk;
struct vmci_handle handle;
struct vmci_qp *qpair;
- u32 attach_sub_id;
u32 detach_sub_id;
bool is_local;
u32 flags;
@@ -1399,7 +1371,6 @@
vsk = vsock_sk(sk);
handle = VMCI_INVALID_HANDLE;
- attach_sub_id = VMCI_INVALID_ID;
detach_sub_id = VMCI_INVALID_ID;
/* If we have gotten here then we should be past the point where old
@@ -1444,23 +1415,15 @@
goto destroy;
}
- /* Subscribe to attach and detach events first.
+ /* Subscribe to detach events first.
*
* XXX We attach once for each queue pair created for now so it is easy
* to find the socket (it's provided), but later we should only
* subscribe once and add a way to lookup sockets by queue pair handle.
*/
- err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
- vmci_transport_peer_attach_cb,
- sk, &attach_sub_id);
- if (err < VMCI_SUCCESS) {
- err = vmci_transport_error_to_vsock_error(err);
- goto destroy;
- }
-
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
vmci_transport_peer_detach_cb,
- sk, &detach_sub_id);
+ vmci_trans(vsk), &detach_sub_id);
if (err < VMCI_SUCCESS) {
err = vmci_transport_error_to_vsock_error(err);
goto destroy;
@@ -1496,7 +1459,6 @@
vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
pkt->u.size;
- vmci_trans(vsk)->attach_sub_id = attach_sub_id;
vmci_trans(vsk)->detach_sub_id = detach_sub_id;
vmci_trans(vsk)->notify_ops->process_negotiate(sk);
@@ -1504,9 +1466,6 @@
return 0;
destroy:
- if (attach_sub_id != VMCI_INVALID_ID)
- vmci_event_unsubscribe(attach_sub_id);
-
if (detach_sub_id != VMCI_INVALID_ID)
vmci_event_unsubscribe(detach_sub_id);
@@ -1607,9 +1566,11 @@
vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
vmci_trans(vsk)->qpair = NULL;
vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
- vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
- VMCI_INVALID_ID;
+ vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
vmci_trans(vsk)->notify_ops = NULL;
+ INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
+ vmci_trans(vsk)->sk = &vsk->sk;
+ spin_lock_init(&vmci_trans(vsk)->lock);
if (psk) {
vmci_trans(vsk)->queue_pair_size =
vmci_trans(psk)->queue_pair_size;
@@ -1629,29 +1590,57 @@
return 0;
}
+static void vmci_transport_free_resources(struct list_head *transport_list)
+{
+ while (!list_empty(transport_list)) {
+ struct vmci_transport *transport =
+ list_first_entry(transport_list, struct vmci_transport,
+ elem);
+ list_del(&transport->elem);
+
+ if (transport->detach_sub_id != VMCI_INVALID_ID) {
+ vmci_event_unsubscribe(transport->detach_sub_id);
+ transport->detach_sub_id = VMCI_INVALID_ID;
+ }
+
+ if (!vmci_handle_is_invalid(transport->qp_handle)) {
+ vmci_qpair_detach(&transport->qpair);
+ transport->qp_handle = VMCI_INVALID_HANDLE;
+ transport->produce_size = 0;
+ transport->consume_size = 0;
+ }
+
+ kfree(transport);
+ }
+}
+
+static void vmci_transport_cleanup(struct work_struct *work)
+{
+ LIST_HEAD(pending);
+
+ spin_lock_bh(&vmci_transport_cleanup_lock);
+ list_replace_init(&vmci_transport_cleanup_list, &pending);
+ spin_unlock_bh(&vmci_transport_cleanup_lock);
+ vmci_transport_free_resources(&pending);
+}
+
static void vmci_transport_destruct(struct vsock_sock *vsk)
{
- if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
- vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
- vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
- }
-
- if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
- vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
- vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
- }
-
- if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
- vmci_qpair_detach(&vmci_trans(vsk)->qpair);
- vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
- vmci_trans(vsk)->produce_size = 0;
- vmci_trans(vsk)->consume_size = 0;
- }
+ /* Ensure that the detach callback doesn't use the sk/vsk
+ * we are about to destruct.
+ */
+ spin_lock_bh(&vmci_trans(vsk)->lock);
+ vmci_trans(vsk)->sk = NULL;
+ spin_unlock_bh(&vmci_trans(vsk)->lock);
if (vmci_trans(vsk)->notify_ops)
vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
- kfree(vsk->trans);
+ spin_lock_bh(&vmci_transport_cleanup_lock);
+ list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
+ spin_unlock_bh(&vmci_transport_cleanup_lock);
+ schedule_work(&vmci_transport_cleanup_work);
+
vsk->trans = NULL;
}
@@ -2146,6 +2135,9 @@
static void __exit vmci_transport_exit(void)
{
+ cancel_work_sync(&vmci_transport_cleanup_work);
+ vmci_transport_free_resources(&vmci_transport_cleanup_list);
+
if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
if (vmci_datagram_destroy_handle(
vmci_transport_stream_handle) != VMCI_SUCCESS)
@@ -2164,6 +2156,7 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
+MODULE_VERSION("1.0.2.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index ce6c962..2ad46f3 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -119,10 +119,12 @@
u64 queue_pair_size;
u64 queue_pair_min_size;
u64 queue_pair_max_size;
- u32 attach_sub_id;
u32 detach_sub_id;
union vmci_transport_notify notify;
struct vmci_transport_notify_ops *notify_ops;
+ struct list_head elem;
+ struct sock *sk;
+ spinlock_t lock; /* protects sk. */
};
int vmci_transport_register(void);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 4f5543d..da72ed3 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -174,6 +174,16 @@
Most distributions have a CRDA package. So if unsure, say N.
+config CFG80211_CRDA_SUPPORT
+ bool "support CRDA" if CFG80211_INTERNAL_REGDB
+ default y
+ depends on CFG80211
+ help
+ You should enable this option unless you know for sure you have no
+ need for it, for example when using internal regdb (above.)
+
+ If unsure, say Y.
+
config CFG80211_WEXT
bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
depends on CFG80211
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f223026..b091551 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -461,6 +461,9 @@
rdev->wiphy.max_num_csa_counters = 1;
+ rdev->wiphy.max_sched_scan_plans = 1;
+ rdev->wiphy.max_sched_scan_plan_interval = U32_MAX;
+
return &rdev->wiphy;
}
EXPORT_SYMBOL(wiphy_new_nm);
@@ -636,7 +639,7 @@
if (WARN_ON(!sband->n_channels))
return -EINVAL;
/*
- * on 60gHz band, there are no legacy rates, so
+ * on 60GHz band, there are no legacy rates, so
* n_bitrates is 0
*/
if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
diff --git a/net/wireless/core.h b/net/wireless/core.h
index b9d5bc8..a618b4b 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -137,6 +137,7 @@
struct list_head list;
struct list_head hidden_list;
struct rb_node rbn;
+ u64 ts_boottime;
unsigned long ts;
unsigned long refcount;
atomic_t hold;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f05ba8b..d693c9d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -479,6 +479,12 @@
[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
};
+static const struct nla_policy
+nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = {
+ [NL80211_SCHED_SCAN_PLAN_INTERVAL] = { .type = NLA_U32 },
+ [NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
@@ -1304,7 +1310,13 @@
nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
rdev->wiphy.max_sched_scan_ie_len) ||
nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
- rdev->wiphy.max_match_sets))
+ rdev->wiphy.max_match_sets) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS,
+ rdev->wiphy.max_sched_scan_plans) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL,
+ rdev->wiphy.max_sched_scan_plan_interval) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
+ rdev->wiphy.max_sched_scan_plan_iterations))
goto nla_put_failure;
if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
@@ -4932,56 +4944,6 @@
return err;
}
-static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
- [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
- [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
- [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
- [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 },
- [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 },
- [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 },
- [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 },
-};
-
-static int parse_reg_rule(struct nlattr *tb[],
- struct ieee80211_reg_rule *reg_rule)
-{
- struct ieee80211_freq_range *freq_range = ®_rule->freq_range;
- struct ieee80211_power_rule *power_rule = ®_rule->power_rule;
-
- if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
- return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_START])
- return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_END])
- return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
- return -EINVAL;
- if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
- return -EINVAL;
-
- reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
-
- freq_range->start_freq_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
- freq_range->end_freq_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
- freq_range->max_bandwidth_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
-
- power_rule->max_eirp =
- nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
-
- if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
- power_rule->max_antenna_gain =
- nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
-
- if (tb[NL80211_ATTR_DFS_CAC_TIME])
- reg_rule->dfs_cac_ms =
- nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
-
- return 0;
-}
-
static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
{
char *data = NULL;
@@ -5613,6 +5575,57 @@
return err;
}
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
+static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
+ [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
+ [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
+ [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
+ [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 },
+ [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 },
+ [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 },
+ [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 },
+};
+
+static int parse_reg_rule(struct nlattr *tb[],
+ struct ieee80211_reg_rule *reg_rule)
+{
+ struct ieee80211_freq_range *freq_range = ®_rule->freq_range;
+ struct ieee80211_power_rule *power_rule = ®_rule->power_rule;
+
+ if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_FREQ_RANGE_START])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_FREQ_RANGE_END])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
+ return -EINVAL;
+
+ reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
+
+ freq_range->start_freq_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
+ freq_range->end_freq_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
+ freq_range->max_bandwidth_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
+
+ power_rule->max_eirp =
+ nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
+
+ if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
+ power_rule->max_antenna_gain =
+ nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
+
+ if (tb[NL80211_ATTR_DFS_CAC_TIME])
+ reg_rule->dfs_cac_ms =
+ nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
+
+ return 0;
+}
+
static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
@@ -5689,6 +5702,7 @@
kfree(rd);
return r;
}
+#endif /* CONFIG_CFG80211_CRDA_SUPPORT */
static int validate_scan_freqs(struct nlattr *freqs)
{
@@ -5974,14 +5988,100 @@
return err;
}
+static int
+nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
+ struct cfg80211_sched_scan_request *request,
+ struct nlattr **attrs)
+{
+ int tmp, err, i = 0;
+ struct nlattr *attr;
+
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) {
+ u32 interval;
+
+ /*
+ * If scan plans are not specified,
+ * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this
+ * case one scan plan will be set with the specified scan
+ * interval and infinite number of iterations.
+ */
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return -EINVAL;
+
+ interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
+ if (!interval)
+ return -EINVAL;
+
+ request->scan_plans[0].interval =
+ DIV_ROUND_UP(interval, MSEC_PER_SEC);
+ if (!request->scan_plans[0].interval)
+ return -EINVAL;
+
+ if (request->scan_plans[0].interval >
+ wiphy->max_sched_scan_plan_interval)
+ request->scan_plans[0].interval =
+ wiphy->max_sched_scan_plan_interval;
+
+ return 0;
+ }
+
+ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) {
+ struct nlattr *plan[NL80211_SCHED_SCAN_PLAN_MAX + 1];
+
+ if (WARN_ON(i >= n_plans))
+ return -EINVAL;
+
+ err = nla_parse(plan, NL80211_SCHED_SCAN_PLAN_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_plan_policy);
+ if (err)
+ return err;
+
+ if (!plan[NL80211_SCHED_SCAN_PLAN_INTERVAL])
+ return -EINVAL;
+
+ request->scan_plans[i].interval =
+ nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]);
+ if (!request->scan_plans[i].interval ||
+ request->scan_plans[i].interval >
+ wiphy->max_sched_scan_plan_interval)
+ return -EINVAL;
+
+ if (plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]) {
+ request->scan_plans[i].iterations =
+ nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]);
+ if (!request->scan_plans[i].iterations ||
+ (request->scan_plans[i].iterations >
+ wiphy->max_sched_scan_plan_iterations))
+ return -EINVAL;
+ } else if (i < n_plans - 1) {
+ /*
+ * All scan plans but the last one must specify
+ * a finite number of iterations
+ */
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ /*
+ * The last scan plan must not specify the number of
+ * iterations, it is supposed to run infinitely
+ */
+ if (request->scan_plans[n_plans - 1].iterations)
+ return -EINVAL;
+
+ return 0;
+}
+
static struct cfg80211_sched_scan_request *
nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr **attrs)
{
struct cfg80211_sched_scan_request *request;
struct nlattr *attr;
- int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
- u32 interval;
+ int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0;
enum ieee80211_band band;
size_t ie_len;
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
@@ -5990,13 +6090,6 @@
if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE]))
return ERR_PTR(-EINVAL);
- if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
- return ERR_PTR(-EINVAL);
-
- interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
- if (interval == 0)
- return ERR_PTR(-EINVAL);
-
if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
@@ -6060,9 +6153,37 @@
if (ie_len > wiphy->max_sched_scan_ie_len)
return ERR_PTR(-EINVAL);
+ if (attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) {
+ /*
+ * NL80211_ATTR_SCHED_SCAN_INTERVAL must not be specified since
+ * each scan plan already specifies its own interval
+ */
+ if (attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return ERR_PTR(-EINVAL);
+
+ nla_for_each_nested(attr,
+ attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp)
+ n_plans++;
+ } else {
+ /*
+ * The scan interval attribute is kept for backward
+ * compatibility. If no scan plans are specified and sched scan
+ * interval is specified, one scan plan will be set with this
+ * scan interval and infinite number of iterations.
+ */
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return ERR_PTR(-EINVAL);
+
+ n_plans = 1;
+ }
+
+ if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
+ return ERR_PTR(-EINVAL);
+
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->match_sets) * n_match_sets
+ + sizeof(*request->scan_plans) * n_plans
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request)
@@ -6090,6 +6211,18 @@
}
request->n_match_sets = n_match_sets;
+ if (n_match_sets)
+ request->scan_plans = (void *)(request->match_sets +
+ n_match_sets);
+ else if (request->ie)
+ request->scan_plans = (void *)(request->ie + ie_len);
+ else if (n_ssids)
+ request->scan_plans = (void *)(request->ssids + n_ssids);
+ else
+ request->scan_plans = (void *)(request->channels + n_channels);
+
+ request->n_scan_plans = n_plans;
+
i = 0;
if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
@@ -6252,7 +6385,10 @@
request->delay =
nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
- request->interval = interval;
+ err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
+ if (err)
+ goto out_free;
+
request->scan_start = jiffies;
return request;
@@ -6605,6 +6741,11 @@
jiffies_to_msecs(jiffies - intbss->ts)))
goto nla_put_failure;
+ if (intbss->ts_boottime &&
+ nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
+ intbss->ts_boottime))
+ goto nla_put_failure;
+
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@ -8845,7 +8986,7 @@
static int nl80211_send_wowlan_nd(struct sk_buff *msg,
struct cfg80211_sched_scan_request *req)
{
- struct nlattr *nd, *freqs, *matches, *match;
+ struct nlattr *nd, *freqs, *matches, *match, *scan_plans, *scan_plan;
int i;
if (!req)
@@ -8855,7 +8996,9 @@
if (!nd)
return -ENOBUFS;
- if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval))
+ if (req->n_scan_plans == 1 &&
+ nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL,
+ req->scan_plans[0].interval * 1000))
return -ENOBUFS;
if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
@@ -8882,6 +9025,23 @@
nla_nest_end(msg, matches);
}
+ scan_plans = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_PLANS);
+ if (!scan_plans)
+ return -ENOBUFS;
+
+ for (i = 0; i < req->n_scan_plans; i++) {
+ scan_plan = nla_nest_start(msg, i + 1);
+ if (!scan_plan ||
+ nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
+ req->scan_plans[i].interval) ||
+ (req->scan_plans[i].iterations &&
+ nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS,
+ req->scan_plans[i].iterations)))
+ return -ENOBUFS;
+ nla_nest_end(msg, scan_plan);
+ }
+ nla_nest_end(msg, scan_plans);
+
nla_nest_end(msg, nd);
return 0;
@@ -10737,6 +10897,7 @@
.internal_flags = NL80211_FLAG_NEED_RTNL,
/* can be retrieved by unprivileged users */
},
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
{
.cmd = NL80211_CMD_SET_REG,
.doit = nl80211_set_reg,
@@ -10744,6 +10905,7 @@
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_RTNL,
},
+#endif
{
.cmd = NL80211_CMD_REQ_SET_REG,
.doit = nl80211_req_set_reg,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7258246..2e8d6f3 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -135,10 +135,7 @@
/* Used to track the userspace process controlling the indoor setting */
static u32 reg_is_indoor_portid;
-/* Max number of consecutive attempts to communicate with CRDA */
-#define REG_MAX_CRDA_TIMEOUTS 10
-
-static u32 reg_crda_timeouts;
+static void restore_regulatory_settings(bool reset_user);
static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
{
@@ -226,9 +223,6 @@
static void reg_todo(struct work_struct *work);
static DECLARE_WORK(reg_work, reg_todo);
-static void reg_timeout_work(struct work_struct *work);
-static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
-
/* We keep a static world regulatory domain in case of the absence of CRDA */
static const struct ieee80211_regdomain world_regdom = {
.n_reg_rules = 8,
@@ -262,7 +256,7 @@
REG_RULE(5745-10, 5825+10, 80, 6, 20,
NL80211_RRF_NO_IR),
- /* IEEE 802.11ad (60gHz), channels 1..3 */
+ /* IEEE 802.11ad (60GHz), channels 1..3 */
REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0),
}
};
@@ -279,6 +273,9 @@
static void reg_free_request(struct regulatory_request *request)
{
+ if (request == &core_request_world)
+ return;
+
if (request != get_last_request())
kfree(request);
}
@@ -453,68 +450,70 @@
}
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
-struct reg_regdb_search_request {
- char alpha2[2];
+struct reg_regdb_apply_request {
struct list_head list;
+ const struct ieee80211_regdomain *regdom;
};
-static LIST_HEAD(reg_regdb_search_list);
-static DEFINE_MUTEX(reg_regdb_search_mutex);
+static LIST_HEAD(reg_regdb_apply_list);
+static DEFINE_MUTEX(reg_regdb_apply_mutex);
-static void reg_regdb_search(struct work_struct *work)
+static void reg_regdb_apply(struct work_struct *work)
{
- struct reg_regdb_search_request *request;
- const struct ieee80211_regdomain *curdom, *regdom = NULL;
- int i;
+ struct reg_regdb_apply_request *request;
rtnl_lock();
- mutex_lock(®_regdb_search_mutex);
- while (!list_empty(®_regdb_search_list)) {
- request = list_first_entry(®_regdb_search_list,
- struct reg_regdb_search_request,
+ mutex_lock(®_regdb_apply_mutex);
+ while (!list_empty(®_regdb_apply_list)) {
+ request = list_first_entry(®_regdb_apply_list,
+ struct reg_regdb_apply_request,
list);
list_del(&request->list);
- for (i = 0; i < reg_regdb_size; i++) {
- curdom = reg_regdb[i];
-
- if (alpha2_equal(request->alpha2, curdom->alpha2)) {
- regdom = reg_copy_regd(curdom);
- break;
- }
- }
-
+ set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB);
kfree(request);
}
- mutex_unlock(®_regdb_search_mutex);
-
- if (!IS_ERR_OR_NULL(regdom))
- set_regdom(regdom, REGD_SOURCE_INTERNAL_DB);
+ mutex_unlock(®_regdb_apply_mutex);
rtnl_unlock();
}
-static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
+static DECLARE_WORK(reg_regdb_work, reg_regdb_apply);
-static void reg_regdb_query(const char *alpha2)
+static int reg_query_builtin(const char *alpha2)
{
- struct reg_regdb_search_request *request;
+ const struct ieee80211_regdomain *regdom = NULL;
+ struct reg_regdb_apply_request *request;
+ unsigned int i;
- if (!alpha2)
- return;
+ for (i = 0; i < reg_regdb_size; i++) {
+ if (alpha2_equal(alpha2, reg_regdb[i]->alpha2)) {
+ regdom = reg_regdb[i];
+ break;
+ }
+ }
- request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
+ if (!regdom)
+ return -ENODATA;
+
+ request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL);
if (!request)
- return;
+ return -ENOMEM;
- memcpy(request->alpha2, alpha2, 2);
+ request->regdom = reg_copy_regd(regdom);
+ if (IS_ERR_OR_NULL(request->regdom)) {
+ kfree(request);
+ return -ENOMEM;
+ }
- mutex_lock(®_regdb_search_mutex);
- list_add_tail(&request->list, ®_regdb_search_list);
- mutex_unlock(®_regdb_search_mutex);
+ mutex_lock(®_regdb_apply_mutex);
+ list_add_tail(&request->list, ®_regdb_apply_list);
+ mutex_unlock(®_regdb_apply_mutex);
schedule_work(®_regdb_work);
+
+ return 0;
}
/* Feel free to add any other sanity checks here */
@@ -525,9 +524,45 @@
}
#else
static inline void reg_regdb_size_check(void) {}
-static inline void reg_regdb_query(const char *alpha2) {}
+static inline int reg_query_builtin(const char *alpha2)
+{
+ return -ENODATA;
+}
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
+/* Max number of consecutive attempts to communicate with CRDA */
+#define REG_MAX_CRDA_TIMEOUTS 10
+
+static u32 reg_crda_timeouts;
+
+static void crda_timeout_work(struct work_struct *work);
+static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work);
+
+static void crda_timeout_work(struct work_struct *work)
+{
+ REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+ rtnl_lock();
+ reg_crda_timeouts++;
+ restore_regulatory_settings(true);
+ rtnl_unlock();
+}
+
+static void cancel_crda_timeout(void)
+{
+ cancel_delayed_work(&crda_timeout);
+}
+
+static void cancel_crda_timeout_sync(void)
+{
+ cancel_delayed_work_sync(&crda_timeout);
+}
+
+static void reset_crda_timeouts(void)
+{
+ reg_crda_timeouts = 0;
+}
+
/*
* This lets us keep regulatory code which is updated on a regulatory
* basis in userspace.
@@ -536,13 +571,11 @@
{
char country[12];
char *env[] = { country, NULL };
+ int ret;
snprintf(country, sizeof(country), "COUNTRY=%c%c",
alpha2[0], alpha2[1]);
- /* query internal regulatory database (if it exists) */
- reg_regdb_query(alpha2);
-
if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
return -EINVAL;
@@ -554,18 +587,34 @@
else
pr_debug("Calling CRDA to update world regulatory domain\n");
- return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env);
-}
-
-static enum reg_request_treatment
-reg_call_crda(struct regulatory_request *request)
-{
- if (call_crda(request->alpha2))
- return REG_REQ_IGNORE;
+ ret = kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env);
+ if (ret)
+ return ret;
queue_delayed_work(system_power_efficient_wq,
- ®_timeout, msecs_to_jiffies(3142));
- return REG_REQ_OK;
+ &crda_timeout, msecs_to_jiffies(3142));
+ return 0;
+}
+#else
+static inline void cancel_crda_timeout(void) {}
+static inline void cancel_crda_timeout_sync(void) {}
+static inline void reset_crda_timeouts(void) {}
+static inline int call_crda(const char *alpha2)
+{
+ return -ENODATA;
+}
+#endif /* CONFIG_CFG80211_CRDA_SUPPORT */
+
+static bool reg_query_database(struct regulatory_request *request)
+{
+ /* query internal regulatory database (if it exists) */
+ if (reg_query_builtin(request->alpha2) == 0)
+ return true;
+
+ if (call_crda(request->alpha2) == 0)
+ return true;
+
+ return false;
}
bool reg_is_valid_request(const char *alpha2)
@@ -1081,11 +1130,11 @@
}
EXPORT_SYMBOL(reg_initiator_name);
-#ifdef CONFIG_CFG80211_REG_DEBUG
static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
struct ieee80211_channel *chan,
const struct ieee80211_reg_rule *reg_rule)
{
+#ifdef CONFIG_CFG80211_REG_DEBUG
const struct ieee80211_power_rule *power_rule;
const struct ieee80211_freq_range *freq_range;
char max_antenna_gain[32], bw[32];
@@ -1096,7 +1145,7 @@
if (!power_rule->max_antenna_gain)
snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A");
else
- snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d",
+ snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d mBi",
power_rule->max_antenna_gain);
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
@@ -1110,19 +1159,12 @@
REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
chan->center_freq);
- REG_DBG_PRINT("%d KHz - %d KHz @ %s), (%s mBi, %d mBm)\n",
+ REG_DBG_PRINT("(%d KHz - %d KHz @ %s), (%s, %d mBm)\n",
freq_range->start_freq_khz, freq_range->end_freq_khz,
bw, max_antenna_gain,
power_rule->max_eirp);
-}
-#else
-static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
- struct ieee80211_channel *chan,
- const struct ieee80211_reg_rule *reg_rule)
-{
- return;
-}
#endif
+}
/*
* Note that right now we assume the desired channel bandwidth
@@ -1311,7 +1353,8 @@
return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS);
}
#else
-static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
+static enum reg_request_treatment
+reg_ignore_cell_hint(struct regulatory_request *pending_request)
{
return REG_REQ_IGNORE;
}
@@ -1846,7 +1889,7 @@
need_more_processing = true;
spin_unlock(®_requests_lock);
- cancel_delayed_work(®_timeout);
+ cancel_crda_timeout();
if (need_more_processing)
schedule_work(®_work);
@@ -1858,19 +1901,18 @@
*
* The wireless subsystem can use this function to process
* a regulatory request issued by the regulatory core.
- *
- * Returns one of the different reg request treatment values.
*/
static enum reg_request_treatment
reg_process_hint_core(struct regulatory_request *core_request)
{
+ if (reg_query_database(core_request)) {
+ core_request->intersect = false;
+ core_request->processed = false;
+ reg_update_last_request(core_request);
+ return REG_REQ_OK;
+ }
- core_request->intersect = false;
- core_request->processed = false;
-
- reg_update_last_request(core_request);
-
- return reg_call_crda(core_request);
+ return REG_REQ_IGNORE;
}
static enum reg_request_treatment
@@ -1915,8 +1957,6 @@
*
* The wireless subsystem can use this function to process
* a regulatory request initiated by userspace.
- *
- * Returns one of the different reg request treatment values.
*/
static enum reg_request_treatment
reg_process_hint_user(struct regulatory_request *user_request)
@@ -1925,20 +1965,20 @@
treatment = __reg_process_hint_user(user_request);
if (treatment == REG_REQ_IGNORE ||
- treatment == REG_REQ_ALREADY_SET) {
- reg_free_request(user_request);
- return treatment;
- }
+ treatment == REG_REQ_ALREADY_SET)
+ return REG_REQ_IGNORE;
user_request->intersect = treatment == REG_REQ_INTERSECT;
user_request->processed = false;
- reg_update_last_request(user_request);
+ if (reg_query_database(user_request)) {
+ reg_update_last_request(user_request);
+ user_alpha2[0] = user_request->alpha2[0];
+ user_alpha2[1] = user_request->alpha2[1];
+ return REG_REQ_OK;
+ }
- user_alpha2[0] = user_request->alpha2[0];
- user_alpha2[1] = user_request->alpha2[1];
-
- return reg_call_crda(user_request);
+ return REG_REQ_IGNORE;
}
static enum reg_request_treatment
@@ -1986,16 +2026,12 @@
case REG_REQ_OK:
break;
case REG_REQ_IGNORE:
- reg_free_request(driver_request);
- return treatment;
+ return REG_REQ_IGNORE;
case REG_REQ_INTERSECT:
- /* fall through */
case REG_REQ_ALREADY_SET:
regd = reg_copy_regd(get_cfg80211_regdom());
- if (IS_ERR(regd)) {
- reg_free_request(driver_request);
+ if (IS_ERR(regd))
return REG_REQ_IGNORE;
- }
tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, regd);
@@ -2006,8 +2042,6 @@
driver_request->intersect = treatment == REG_REQ_INTERSECT;
driver_request->processed = false;
- reg_update_last_request(driver_request);
-
/*
* Since CRDA will not be called in this case as we already
* have applied the requested regulatory domain before we just
@@ -2015,11 +2049,17 @@
*/
if (treatment == REG_REQ_ALREADY_SET) {
nl80211_send_reg_change_event(driver_request);
+ reg_update_last_request(driver_request);
reg_set_request_processed();
- return treatment;
+ return REG_REQ_ALREADY_SET;
}
- return reg_call_crda(driver_request);
+ if (reg_query_database(driver_request)) {
+ reg_update_last_request(driver_request);
+ return REG_REQ_OK;
+ }
+
+ return REG_REQ_IGNORE;
}
static enum reg_request_treatment
@@ -2085,12 +2125,11 @@
case REG_REQ_OK:
break;
case REG_REQ_IGNORE:
- /* fall through */
+ return REG_REQ_IGNORE;
case REG_REQ_ALREADY_SET:
reg_free_request(country_ie_request);
- return treatment;
+ return REG_REQ_ALREADY_SET;
case REG_REQ_INTERSECT:
- reg_free_request(country_ie_request);
/*
* This doesn't happen yet, not sure we
* ever want to support it for this case.
@@ -2102,9 +2141,12 @@
country_ie_request->intersect = false;
country_ie_request->processed = false;
- reg_update_last_request(country_ie_request);
+ if (reg_query_database(country_ie_request)) {
+ reg_update_last_request(country_ie_request);
+ return REG_REQ_OK;
+ }
- return reg_call_crda(country_ie_request);
+ return REG_REQ_IGNORE;
}
/* This processes *all* regulatory hints */
@@ -2118,11 +2160,11 @@
switch (reg_request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
- reg_process_hint_core(reg_request);
- return;
+ treatment = reg_process_hint_core(reg_request);
+ break;
case NL80211_REGDOM_SET_BY_USER:
- reg_process_hint_user(reg_request);
- return;
+ treatment = reg_process_hint_user(reg_request);
+ break;
case NL80211_REGDOM_SET_BY_DRIVER:
if (!wiphy)
goto out_free;
@@ -2138,6 +2180,12 @@
goto out_free;
}
+ if (treatment == REG_REQ_IGNORE)
+ goto out_free;
+
+ WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET,
+ "unexpected treatment value %d\n", treatment);
+
/* This is required so that the orig_* parameters are saved.
* NOTE: treatment must be set for any case that reaches here!
*/
@@ -2345,7 +2393,7 @@
request->user_reg_hint_type = user_reg_hint_type;
/* Allow calling CRDA again */
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
queue_regulatory_request(request);
@@ -2417,7 +2465,7 @@
request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
/* Allow calling CRDA again */
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
queue_regulatory_request(request);
@@ -2473,7 +2521,7 @@
request->country_ie_env = env;
/* Allow calling CRDA again */
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
queue_regulatory_request(request);
request = NULL;
@@ -2874,11 +2922,8 @@
}
request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx);
- if (!request_wiphy) {
- queue_delayed_work(system_power_efficient_wq,
- ®_timeout, 0);
+ if (!request_wiphy)
return -ENODEV;
- }
if (!driver_request->intersect) {
if (request_wiphy->regd)
@@ -2935,11 +2980,8 @@
}
request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx);
- if (!request_wiphy) {
- queue_delayed_work(system_power_efficient_wq,
- ®_timeout, 0);
+ if (!request_wiphy)
return -ENODEV;
- }
if (country_ie_request->intersect)
return -EINVAL;
@@ -2966,7 +3008,7 @@
}
if (regd_src == REGD_SOURCE_CRDA)
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
lr = get_last_request();
@@ -3123,15 +3165,6 @@
lr->country_ie_env = ENVIRON_ANY;
}
-static void reg_timeout_work(struct work_struct *work)
-{
- REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
- rtnl_lock();
- reg_crda_timeouts++;
- restore_regulatory_settings(true);
- rtnl_unlock();
-}
-
/*
* See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for
* UNII band definitions
@@ -3217,7 +3250,7 @@
struct reg_beacon *reg_beacon, *btmp;
cancel_work_sync(®_work);
- cancel_delayed_work_sync(®_timeout);
+ cancel_crda_timeout_sync();
cancel_delayed_work_sync(®_check_chans);
/* Lock to suppress warnings */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 3a50aa2..14d5369e 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -266,8 +266,7 @@
spin_lock_bh(&rdev->bss_lock);
__cfg80211_bss_expire(rdev, request->scan_start);
spin_unlock_bh(&rdev->bss_lock);
- request->scan_start =
- jiffies + msecs_to_jiffies(request->interval);
+ request->scan_start = jiffies;
}
nl80211_send_sched_scan_results(rdev, request->dev);
}
@@ -839,6 +838,7 @@
found->pub.signal = tmp->pub.signal;
found->pub.capability = tmp->pub.capability;
found->ts = tmp->ts;
+ found->ts_boottime = tmp->ts_boottime;
} else {
struct cfg80211_internal_bss *new;
struct cfg80211_internal_bss *hidden;
@@ -938,14 +938,13 @@
}
/* Returned bss is reference counted and must be cleaned up appropriately. */
-struct cfg80211_bss*
-cfg80211_inform_bss_width(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- enum cfg80211_bss_frame_type ftype,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp)
+struct cfg80211_bss *
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ gfp_t gfp)
{
struct cfg80211_bss_ies *ies;
struct ieee80211_channel *channel;
@@ -957,19 +956,21 @@
return NULL;
if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (signal < 0 || signal > 100)))
+ (data->signal < 0 || data->signal > 100)))
return NULL;
- channel = cfg80211_get_bss_channel(wiphy, ie, ielen, rx_channel);
+ channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
if (!channel)
return NULL;
memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
tmp.pub.channel = channel;
- tmp.pub.scan_width = scan_width;
- tmp.pub.signal = signal;
+ tmp.pub.scan_width = data->scan_width;
+ tmp.pub.signal = data->signal;
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
+ tmp.ts_boottime = data->boottime_ns;
+
/*
* If we do not know here whether the IEs are from a Beacon or Probe
* Response frame, we need to pick one of the options and only use it
@@ -999,7 +1000,7 @@
}
rcu_assign_pointer(tmp.pub.ies, ies);
- signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+ signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
wiphy->max_adj_channel_rssi_comp;
res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
if (!res)
@@ -1019,15 +1020,15 @@
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_width);
+EXPORT_SYMBOL(cfg80211_inform_bss_data);
-/* Returned bss is reference counted and must be cleaned up appropriately. */
+/* cfg80211_inform_bss_width_frame helper */
struct cfg80211_bss *
-cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+
{
struct cfg80211_internal_bss tmp = {}, *res;
struct cfg80211_bss_ies *ies;
@@ -1040,8 +1041,7 @@
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
offsetof(struct ieee80211_mgmt, u.beacon.variable));
- trace_cfg80211_inform_bss_width_frame(wiphy, rx_channel, scan_width, mgmt,
- len, signal);
+ trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
if (WARN_ON(!mgmt))
return NULL;
@@ -1050,14 +1050,14 @@
return NULL;
if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (signal < 0 || signal > 100)))
+ (data->signal < 0 || data->signal > 100)))
return NULL;
if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
return NULL;
channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
- ielen, rx_channel);
+ ielen, data->chan);
if (!channel)
return NULL;
@@ -1077,12 +1077,13 @@
memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
tmp.pub.channel = channel;
- tmp.pub.scan_width = scan_width;
- tmp.pub.signal = signal;
+ tmp.pub.scan_width = data->scan_width;
+ tmp.pub.signal = data->signal;
tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
+ tmp.ts_boottime = data->boottime_ns;
- signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+ signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
wiphy->max_adj_channel_rssi_comp;
res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
if (!res)
@@ -1102,7 +1103,7 @@
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
+EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
{
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index a808279..0c392d3 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2670,30 +2670,30 @@
__entry->privacy)
);
-TRACE_EVENT(cfg80211_inform_bss_width_frame,
- TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
- enum nl80211_bss_scan_width scan_width,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal),
- TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal),
+TRACE_EVENT(cfg80211_inform_bss_frame,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len),
+ TP_ARGS(wiphy, data, mgmt, len),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_ENTRY
__field(enum nl80211_bss_scan_width, scan_width)
__dynamic_array(u8, mgmt, len)
__field(s32, signal)
+ __field(u64, ts_boottime)
),
TP_fast_assign(
WIPHY_ASSIGN;
- CHAN_ASSIGN(channel);
- __entry->scan_width = scan_width;
+ CHAN_ASSIGN(data->chan);
+ __entry->scan_width = data->scan_width;
if (mgmt)
memcpy(__get_dynamic_array(mgmt), mgmt, len);
- __entry->signal = signal;
+ __entry->signal = data->signal;
+ __entry->ts_boottime = data->boottime_ns;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d",
+ TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d, tsb:%llu",
WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
- __entry->signal)
+ __entry->signal, (unsigned long long)__entry->ts_boottime)
);
DECLARE_EVENT_CLASS(cfg80211_bss_evt,
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index a8de9e3..24e06a2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1928,8 +1928,10 @@
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+ struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
- if (!lt && !rp && !re)
+ if (!lt && !rp && !re && !et && !rt)
return err;
/* pedantic mode - thou shalt sayeth replaceth */
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 63e7d50..b305145 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -13,6 +13,7 @@
hostprogs-y += tracex4
hostprogs-y += tracex5
hostprogs-y += tracex6
+hostprogs-y += trace_output
hostprogs-y += lathist
test_verifier-objs := test_verifier.o libbpf.o
@@ -27,6 +28,7 @@
tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
+trace_output-objs := bpf_load.o libbpf.o trace_output_user.o
lathist-objs := bpf_load.o libbpf.o lathist_user.o
# Tell kbuild to always build the programs
@@ -40,6 +42,7 @@
always += tracex4_kern.o
always += tracex5_kern.o
always += tracex6_kern.o
+always += trace_output_kern.o
always += tcbpf1_kern.o
always += lathist_kern.o
@@ -55,6 +58,7 @@
HOSTLOADLIBES_tracex4 += -lelf -lrt
HOSTLOADLIBES_tracex5 += -lelf
HOSTLOADLIBES_tracex6 += -lelf
+HOSTLOADLIBES_trace_output += -lelf -lrt
HOSTLOADLIBES_lathist += -lelf
# point this to your LLVM backend with bpf support
@@ -64,3 +68,6 @@
clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+ clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
+ -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 21aa1b4..b35c21e 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -37,6 +37,8 @@
(void *) BPF_FUNC_clone_redirect;
static int (*bpf_redirect)(int ifindex, int flags) =
(void *) BPF_FUNC_redirect;
+static int (*bpf_perf_event_output)(void *ctx, void *map, int index, void *data, int size) =
+ (void *) BPF_FUNC_perf_event_output;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
new file mode 100644
index 0000000..8d8d1ec
--- /dev/null
+++ b/samples/bpf/trace_output_kern.c
@@ -0,0 +1,31 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u32),
+ .max_entries = 2,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ struct S {
+ u64 pid;
+ u64 cookie;
+ } data;
+
+ memset(&data, 0, sizeof(data));
+ data.pid = bpf_get_current_pid_tgid();
+ data.cookie = 0x12345678;
+
+ bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
new file mode 100644
index 0000000..661a7d0
--- /dev/null
+++ b/samples/bpf/trace_output_user.c
@@ -0,0 +1,196 @@
+/* This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <signal.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+static int pmu_fd;
+
+int page_size;
+int page_cnt = 8;
+volatile struct perf_event_mmap_page *header;
+
+typedef void (*print_fn)(void *data, int size);
+
+static int perf_event_mmap(int fd)
+{
+ void *base;
+ int mmap_size;
+
+ page_size = getpagesize();
+ mmap_size = page_size * (page_cnt + 1);
+
+ base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (base == MAP_FAILED) {
+ printf("mmap err\n");
+ return -1;
+ }
+
+ header = base;
+ return 0;
+}
+
+static int perf_event_poll(int fd)
+{
+ struct pollfd pfd = { .fd = fd, .events = POLLIN };
+
+ return poll(&pfd, 1, 1000);
+}
+
+struct perf_event_sample {
+ struct perf_event_header header;
+ __u32 size;
+ char data[];
+};
+
+void perf_event_read(print_fn fn)
+{
+ __u64 data_tail = header->data_tail;
+ __u64 data_head = header->data_head;
+ __u64 buffer_size = page_cnt * page_size;
+ void *base, *begin, *end;
+ char buf[256];
+
+ asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
+ if (data_head == data_tail)
+ return;
+
+ base = ((char *)header) + page_size;
+
+ begin = base + data_tail % buffer_size;
+ end = base + data_head % buffer_size;
+
+ while (begin != end) {
+ struct perf_event_sample *e;
+
+ e = begin;
+ if (begin + e->header.size > base + buffer_size) {
+ long len = base + buffer_size - begin;
+
+ assert(len < e->header.size);
+ memcpy(buf, begin, len);
+ memcpy(buf + len, base, e->header.size - len);
+ e = (void *) buf;
+ begin = base + e->header.size - len;
+ } else if (begin + e->header.size == base + buffer_size) {
+ begin = base;
+ } else {
+ begin += e->header.size;
+ }
+
+ if (e->header.type == PERF_RECORD_SAMPLE) {
+ fn(e->data, e->size);
+ } else if (e->header.type == PERF_RECORD_LOST) {
+ struct {
+ struct perf_event_header header;
+ __u64 id;
+ __u64 lost;
+ } *lost = (void *) e;
+ printf("lost %lld events\n", lost->lost);
+ } else {
+ printf("unknown event type=%d size=%d\n",
+ e->header.type, e->header.size);
+ }
+ }
+
+ __sync_synchronize(); /* smp_mb() */
+ header->data_tail = data_head;
+}
+
+static __u64 time_get_ns(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static __u64 start_time;
+
+#define MAX_CNT 100000ll
+
+static void print_bpf_output(void *data, int size)
+{
+ static __u64 cnt;
+ struct {
+ __u64 pid;
+ __u64 cookie;
+ } *e = data;
+
+ if (e->cookie != 0x12345678) {
+ printf("BUG pid %llx cookie %llx sized %d\n",
+ e->pid, e->cookie, size);
+ kill(0, SIGINT);
+ }
+
+ cnt++;
+
+ if (cnt == MAX_CNT) {
+ printf("recv %lld events per sec\n",
+ MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+ kill(0, SIGINT);
+ }
+}
+
+static void test_bpf_perf_event(void)
+{
+ struct perf_event_attr attr = {
+ .sample_type = PERF_SAMPLE_RAW,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_BPF_OUTPUT,
+ };
+ int key = 0;
+
+ pmu_fd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
+
+ assert(pmu_fd >= 0);
+ assert(bpf_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+}
+
+int main(int argc, char **argv)
+{
+ char filename[256];
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ test_bpf_perf_event();
+
+ if (perf_event_mmap(pmu_fd) < 0)
+ return 1;
+
+ f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r");
+ (void) f;
+
+ start_time = time_get_ns();
+ for (;;) {
+ perf_event_poll(pmu_fd);
+ perf_event_read(print_bpf_output);
+ }
+
+ return 0;
+}