Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 88bb71b..9eb1ba5 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -177,18 +177,6 @@
A convenience function to print out the PHY status neatly.
- int phy_clear_interrupt(struct phy_device *phydev);
- int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
-
- Clear the PHY's interrupt, and configure which ones are allowed,
- respectively. Currently only supports all on, or all off.
-
- int phy_enable_interrupts(struct phy_device *phydev);
- int phy_disable_interrupts(struct phy_device *phydev);
-
- Functions which enable/disable PHY interrupts, clearing them
- before and after, respectively.
-
int phy_start_interrupts(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
@@ -213,12 +201,6 @@
Fills the phydev structure with up-to-date information about the current
settings in the PHY.
- void phy_sanitize_settings(struct phy_device *phydev)
-
- Resolves differences between currently desired settings, and
- supported settings for the given PHY device. Does not make
- the changes in the hardware, though.
-
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 80f9f36..97c5898 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1736,9 +1736,10 @@
eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
if (readl(&eprom->magic) != ENI155_MAGIC) {
printk("\n");
- printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
- "magic - expected 0x%x, got 0x%x\n",dev->number,
- ENI155_MAGIC,(unsigned) readl(&eprom->magic));
+ printk(KERN_ERR DEV_LABEL
+ "(itf %d): bad magic - expected 0x%x, got 0x%x\n",
+ dev->number, ENI155_MAGIC,
+ (unsigned)readl(&eprom->magic));
error = -EINVAL;
goto unmap;
}
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c
index 1a9332e..9a676ee 100644
--- a/drivers/atm/solos-attrlist.c
+++ b/drivers/atm/solos-attrlist.c
@@ -1,6 +1,7 @@
SOLOS_ATTR_RO(DriverVersion)
SOLOS_ATTR_RO(APIVersion)
SOLOS_ATTR_RO(FirmwareVersion)
+SOLOS_ATTR_RO(Version)
// SOLOS_ATTR_RO(DspVersion)
// SOLOS_ATTR_RO(CommonHandshake)
SOLOS_ATTR_RO(Connected)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f46138a..2e08c99 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1161,6 +1161,14 @@
dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
major_ver, minor_ver, fpga_ver);
+ if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
+ db_fpga_upgrade || db_firmware_upgrade)) {
+ dev_warn(&dev->dev,
+ "FPGA too old; cannot upgrade flash. Use JTAG.\n");
+ fpga_upgrade = firmware_upgrade = 0;
+ db_fpga_upgrade = db_firmware_upgrade = 0;
+ }
+
if (card->fpga_version >= DMA_SUPPORTED){
card->using_dma = 1;
} else {
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 210338e..81270d2 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,48 +31,6 @@
#include <linux/connector.h>
#include <linux/delay.h>
-
-/*
- * This job is sent to the kevent workqueue.
- * While no event is once sent to any callback, the connector workqueue
- * is not created to avoid a useless waiting kernel task.
- * Once the first event is received, we create this dedicated workqueue which
- * is necessary because the flow of data can be high and we don't want
- * to encumber keventd with that.
- */
-static void cn_queue_create(struct work_struct *work)
-{
- struct cn_queue_dev *dev;
-
- dev = container_of(work, struct cn_queue_dev, wq_creation);
-
- dev->cn_queue = create_singlethread_workqueue(dev->name);
- /* If we fail, we will use keventd for all following connector jobs */
- WARN_ON(!dev->cn_queue);
-}
-
-/*
- * Queue a data sent to a callback.
- * If the connector workqueue is already created, we queue the job on it.
- * Otherwise, we queue the job to kevent and queue the connector workqueue
- * creation too.
- */
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
-{
- struct cn_queue_dev *pdev = cbq->pdev;
-
- if (likely(pdev->cn_queue))
- return queue_work(pdev->cn_queue, work);
-
- /* Don't create the connector workqueue twice */
- if (atomic_inc_return(&pdev->wq_requested) == 1)
- schedule_work(&pdev->wq_creation);
- else
- atomic_dec(&pdev->wq_requested);
-
- return schedule_work(work);
-}
-
void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
@@ -111,11 +69,7 @@
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
{
- /* The first jobs have been sent to kevent, flush them too */
- flush_scheduled_work();
- if (cbq->pdev->cn_queue)
- flush_workqueue(cbq->pdev->cn_queue);
-
+ flush_workqueue(cbq->pdev->cn_queue);
kfree(cbq);
}
@@ -193,11 +147,14 @@
atomic_set(&dev->refcnt, 0);
INIT_LIST_HEAD(&dev->queue_list);
spin_lock_init(&dev->queue_lock);
- init_waitqueue_head(&dev->wq_created);
dev->nls = nls;
- INIT_WORK(&dev->wq_creation, cn_queue_create);
+ dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
+ if (!dev->cn_queue) {
+ kfree(dev);
+ return NULL;
+ }
return dev;
}
@@ -205,25 +162,9 @@
void cn_queue_free_dev(struct cn_queue_dev *dev)
{
struct cn_callback_entry *cbq, *n;
- long timeout;
- DEFINE_WAIT(wait);
- /* Flush the first pending jobs queued on kevent */
- flush_scheduled_work();
-
- /* If the connector workqueue creation is still pending, wait for it */
- prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
- timeout = schedule_timeout(HZ * 2);
- if (!timeout && !dev->cn_queue)
- WARN_ON(1);
- }
- finish_wait(&dev->wq_created, &wait);
-
- if (dev->cn_queue) {
- flush_workqueue(dev->cn_queue);
- destroy_workqueue(dev->cn_queue);
- }
+ flush_workqueue(dev->cn_queue);
+ destroy_workqueue(dev->cn_queue);
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 1d48f40..e16c3fa 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -133,7 +133,8 @@
__cbq->data.skb == NULL)) {
__cbq->data.skb = skb;
- if (queue_cn_work(__cbq, &__cbq->work))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__cbq->work))
err = 0;
else
err = -EINVAL;
@@ -148,13 +149,11 @@
d->callback = __cbq->data.callback;
d->free = __new_cbq;
- __new_cbq->pdev = __cbq->pdev;
-
INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
- if (queue_cn_work(__new_cbq,
- &__new_cbq->work))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__new_cbq->work))
err = 0;
else {
kfree(__new_cbq);
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index af25e1f..e90db88 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -563,7 +563,7 @@
mdelay(10);
hw->ipac.isac.adf2 = 0x87;
hw->ipac.hscx[0].slot = 0x1f;
- hw->ipac.hscx[0].slot = 0x23;
+ hw->ipac.hscx[1].slot = 0x23;
break;
case INF_GAZEL_R753:
val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 40b914b..2e72227 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1427,8 +1427,8 @@
&bcs->hw.isar.reg->Flags))
bcs->hw.isar.dpath = 1;
else {
- printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n");
- debugl1(cs, "isar modeisar analog funktions only with DP1");
+ printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
+ debugl1(cs, "isar modeisar analog functions only with DP1");
return(1);
}
break;
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index b0554f8..ee4dae1 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -164,11 +164,9 @@
char tmp[80];
struct sk_buff *skb = arg;
- p = skb->data;
-
/* Channel Identification */
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_chanID, 0))) {
+ p = findie(skb->data, skb->len, WE0_chanID, 0);
+ if (p) {
if (p[1] != 1) {
l3_1tr6_error(pc, "setup wrong chanID len", skb);
return;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 3232206..7446d8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -392,6 +392,7 @@
if (dev) {
struct mISDN_devinfo di;
+ memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
@@ -672,6 +673,7 @@
if (dev) {
struct mISDN_devinfo di;
+ memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 77c1fab..f24179d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2520,6 +2520,7 @@
config PCH_GBE
tristate "PCH Gigabit Ethernet"
depends on PCI
+ select MII
---help---
This is a gigabit ethernet driver for Topcliff PCH.
Topcliff PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 3134e53..8cb27cb 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -407,7 +407,7 @@
int writeflag)
{
int ret;
- long flags;
+ unsigned long flags;
long *vbr, save_berr;
local_irq_save(flags);
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index ef4115b..9ab5809 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -631,8 +631,6 @@
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
-extern int atl1c_up(struct atl1c_adapter *adapter);
-extern void atl1c_down(struct atl1c_adapter *adapter);
extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
extern void atl1c_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 99ffcf6..09b099b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -66,6 +66,8 @@
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do);
+static int atl1c_up(struct atl1c_adapter *adapter);
+static void atl1c_down(struct atl1c_adapter *adapter);
static const u16 atl1c_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@
return err;
}
-int atl1c_up(struct atl1c_adapter *adapter)
+static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
@@ -2351,7 +2353,7 @@
return err;
}
-void atl1c_down(struct atl1c_adapter *adapter)
+static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index dbd27b8..5336310 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -91,6 +91,8 @@
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
+static const struct ethtool_ops atl1_ethtool_ops;
+
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
@@ -553,7 +555,7 @@
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
{
u32 crc32, value = 0;
int i;
@@ -570,7 +572,7 @@
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
@@ -914,7 +916,7 @@
return 0;
}
-void atl1_set_mac_addr(struct atl1_hw *hw)
+static void atl1_set_mac_addr(struct atl1_hw *hw)
{
u32 value;
/*
@@ -3041,7 +3043,6 @@
atl1_pcie_patch(adapter);
/* assume we have no link for now */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
@@ -3658,7 +3659,7 @@
return 0;
}
-const struct ethtool_ops atl1_ethtool_ops = {
+static const struct ethtool_ops atl1_ethtool_ops = {
.get_settings = atl1_get_settings,
.set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 9c0ddb2..68de8cb 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -56,16 +56,13 @@
struct atl1_hw;
/* function prototypes needed by multiple files */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
-void atl1_set_mac_addr(struct atl1_hw *hw);
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+static void atl1_set_mac_addr(struct atl1_hw *hw);
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static u32 atl1_check_link(struct atl1_adapter *adapter);
-extern const struct ethtool_ops atl1_ethtool_ops;
-
/* hardware definitions specific to L1 */
/* Block IDLE Status Register */
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f979ea2..afb7f7d 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -41,6 +41,10 @@
#include "atlx.h"
+static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atlx_set_mac_addr(struct atl1_hw *hw);
+
static struct atlx_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e7f305..36eca1c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1471,42 +1471,6 @@
return status;
}
-/* Uses sync mcc */
-int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_port_type *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
- OPCODE_COMMON_READ_TRANSRECV_DATA);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
-
- req->port = cpu_to_le32(port);
- req->page_num = cpu_to_le32(TR_PAGE_A0);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
- *connector = resp->data.connector;
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c7f6cdf..8469ff0 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1022,8 +1022,6 @@
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
u8 port_num, u32 *state);
-extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector);
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 45b1f66..c36cd2f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -849,20 +849,16 @@
stats->rx_mcast_pkts++;
}
-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
+static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
{
- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
+ u8 l4_cksm, ipv6, ipcksm;
l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- if (ip_version) {
- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
- }
- ipv6_chk = (ip_version && (tcpf || udpf));
+ ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
+ /* Ignore ipcksm for ipv6 pkts */
+ return l4_cksm && (ipcksm || ipv6);
}
static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@
skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
- if (do_pkt_csum(rxcp, adapter->rx_csum))
- skb_checksum_none_assert(skb);
- else
+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@
return (tcp_frame && !err) ? true : false;
}
-int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_poll_rx(struct napi_struct *napi, int budget)
{
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@
struct be_rx_obj *rxo;
int i;
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+ goto reschedule;
+ }
+
if (!adapter->stats_ioctl_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
@@ -1824,6 +1834,7 @@
if (!adapter->ue_detected)
be_detect_dump_ue(adapter);
+reschedule:
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
@@ -2019,8 +2030,6 @@
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec, i;
- cancel_delayed_work_sync(&adapter->work);
-
be_async_mcc_disable(adapter);
netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
@@ -2299,9 +2306,6 @@
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-char flash_cookie[2][16] = {"*** SE FLAS",
- "H DIRECTORY *** "};
-
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
int hdr_size)
@@ -2559,7 +2563,6 @@
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
- netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
@@ -2715,6 +2718,8 @@
if (!adapter)
return;
+ cancel_delayed_work_sync(&adapter->work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
@@ -2868,8 +2873,10 @@
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
+ netif_carrier_off(netdev);
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9571ecf..863e73a 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-3"
-#define DRV_MODULE_RELDATE "2010/10/19"
+#define DRV_MODULE_VERSION "1.60.00-4"
+#define DRV_MODULE_RELDATE "2010/11/01"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -1288,15 +1288,11 @@
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index bc58375..459614d2 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include "bnx2x_init.h"
+static int bnx2x_setup_irqs(struct bnx2x *bp);
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -2187,7 +2188,7 @@
}
-int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG) {
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 5bfe0ab..6b28739 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -117,13 +117,6 @@
void bnx2x_int_enable(struct bnx2x *bp);
/**
- * Disable HW interrupts.
- *
- * @param bp
- */
-void bnx2x_int_disable(struct bnx2x *bp);
-
-/**
* Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns.
*
@@ -192,17 +185,6 @@
int is_leading);
/**
- * Bring down an eth client.
- *
- * @param bp
- * @param p
- *
- * @return int
- */
-int bnx2x_stop_fw_client(struct bnx2x *bp,
- struct bnx2x_client_ramrod_params *p);
-
-/**
* Set number of queues according to mode
*
* @param bp
@@ -250,34 +232,6 @@
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). The function will wait until the ramrod completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
-#endif
-
-/**
- * Initialize status block in FW and HW
- *
- * @param bp driver handle
- * @param dma_addr_t mapping
- * @param int sb_id
- * @param int vfid
- * @param u8 vf_valid
- * @param int fw_sb_id
- * @param int igu_sb_id
- */
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
- u8 vf_valid, int fw_sb_id, int igu_sb_id);
-
/**
* Set MAC filtering configurations.
*
@@ -326,7 +280,6 @@
* @return int
*/
int bnx2x_func_start(struct bnx2x *bp);
-int bnx2x_func_stop(struct bnx2x *bp);
/**
* Prepare ILT configurations according to current driver
@@ -396,14 +349,6 @@
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * Request IRQ vectors from OS.
- *
- * @param bp
- *
- * @return int
- */
-int bnx2x_setup_irqs(struct bnx2x *bp);
-/**
* NAPI callback
*
* @param napi
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23..4cfd4e9 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[57]; /* 0x1A8 */
+ u32 Reserved1[56]; /* 0x1A8 */
+ u32 default_cfg; /* 0x288 */
+ /* Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index e65de78..a306b0e 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -16,7 +16,9 @@
#define BNX2X_INIT_OPS_H
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
-
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
@@ -589,7 +591,7 @@
return rc;
}
-int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
@@ -635,7 +637,7 @@
}
}
-void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
@@ -688,8 +690,10 @@
}
}
-void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ u8 initop)
{
int i;
@@ -703,8 +707,8 @@
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
-void bnx2x_ilt_client_init_op(struct bnx2x *bp,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
@@ -720,7 +724,7 @@
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
-void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
-void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
-void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
int port = BP_PORT(bp);
@@ -814,8 +818,8 @@
}
/* called during init common stage */
-void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
@@ -836,8 +840,8 @@
****************************************************************************/
/* called during init func stage */
-void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
- dma_addr_t t2_mapping, int src_cid_count)
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+ dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 3e99bf9..5809196 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -181,6 +181,12 @@
(_bank + (_addr & 0xf)), \
_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val);
+
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@
return 0;
}
-u8 bnx2x_bmac_enable(struct link_params *params,
+static u8 bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
@@ -604,7 +610,7 @@
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- udelay(10);
+ msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2537,122 +2543,6 @@
}
}
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requested led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
- u32 led_idx, u32 value)
-{
- u32 reg_val;
-
- /* If port 0 then use EMAC0, else use EMAC1*/
- u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() port %x led_idx %d value %d\n",
- port, led_idx, value);
-
- switch (led_idx) {
- case 0: /* 10MB led */
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 10M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_10MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 1: /*100MB led */
- /*Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 100M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_100MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 2: /* 1000MB led */
- /* Read the current value of the LED register in the
- EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
- reset it. */
- reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 3: /* 2500MB led */
- /* Read the current value of the LED register in the
- EMAC block*/
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 4: /*10G led */
- if (port == 0) {
- REG_WR(bp, NIG_REG_LED_10G_P0,
- value);
- } else {
- REG_WR(bp, NIG_REG_LED_10G_P1,
- value);
- }
- break;
- case 5: /* TRAFFIC led */
- /* Find if the traffic control is via BMAC or EMAC */
- if (port == 0)
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
- else
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
-
- /* Override the traffic led in the EMAC:*/
- if (reg_val == 1) {
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base +
- EMAC_REG_EMAC_LED);
- /* Set the TRAFFIC_OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the TRAFFIC bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
- (reg_val & ~EMAC_LED_TRAFFIC);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- } else { /* Override the traffic led in the BMAC: */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
- value);
- }
- break;
- default:
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() unknown led index %d "
- "(should be 0-5)\n", led_idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
u8 bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed)
{
@@ -3635,13 +3525,19 @@
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
/* Enable CL37 BAM */
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, val | 1);
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -4099,9 +3995,9 @@
return -EINVAL;
}
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -5412,7 +5308,7 @@
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
- bnx2x_wait_reset_complete(bp, phy);
+
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -5541,6 +5437,7 @@
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5551,7 +5448,7 @@
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port, initialize = 1;
+ u8 port, initialize = 1;
u16 val;
u16 temp;
u32 actual_phy_selection;
@@ -5560,11 +5457,16 @@
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
msleep(1);
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(200); /* 100 is not enough */
-
+ bnx2x_wait_reset_complete(bp, phy);
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
/* BCM84823 requires that XGXS links up first @ 10G for normal
behavior */
temp = vars->line_speed;
@@ -5735,7 +5637,11 @@
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u8 port;
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
@@ -6819,13 +6725,6 @@
return 0;
}
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
-{
- if (phy_idx < params->num_phys)
- return params->phy[phy_idx].supported;
- return 0;
-}
-
static void set_phy_vars(struct link_params *params)
{
struct bnx2x *bp = params->bp;
@@ -7045,7 +6944,7 @@
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u8 phy_index, port = params->port;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -7083,9 +6982,18 @@
params->phy[phy_index].link_reset(
¶ms->phy[phy_index],
params);
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
}
}
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
if (params->phy[INT_PHY].link_reset)
params->phy[INT_PHY].link_reset(
¶ms->phy[INT_PHY], params);
@@ -7116,6 +7024,7 @@
s8 port;
s8 port_of_path = 0;
+ bnx2x_ext_phy_hw_reset(bp, 0);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7138,7 +7047,8 @@
return -EINVAL;
}
/* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
@@ -7249,7 +7159,7 @@
(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
- bnx2x_ext_phy_hw_reset(bp, 1);
+ bnx2x_ext_phy_hw_reset(bp, 0);
msleep(5);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 58a4c71..171abf8 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -279,12 +279,6 @@
u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
-
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
-
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
-
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base);
-/* Returns the aggregative supported attributes of the phys on board */
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
-
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index ff99a2f..e9ad16f 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -403,7 +403,7 @@
/* used only at init
* locking is done by mcp
*/
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+ int msglvl)
{
u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
@@ -551,8 +552,9 @@
return opcode;
}
-void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
- u8 src_type, u8 dst_type)
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
@@ -567,7 +569,8 @@
}
/* issue a dmae command over the init-channel and wailt for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@
bnx2x_issue_dmae_with_comp(bp, &dmae);
}
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len)
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
{
int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
@@ -1267,7 +1270,7 @@
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@
}
/* must be called under rtnl_lock */
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
u32 mask = (1 << cl_id);
@@ -2303,7 +2306,7 @@
bp->mac_filters.unmatched_unicast & ~mask;
}
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
struct tstorm_eth_function_common_config tcfg = {0};
u16 rss_flgs;
@@ -2460,7 +2463,7 @@
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
-void bnx2x_pf_init(struct bnx2x *bp)
+static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@
hc_sm->time_to_expire = 0xFFFFFFFF;
}
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -6021,6 +6024,9 @@
/*
* Init service functions
*/
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags);
+
int bnx2x_func_start(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@
WAIT_RAMROD_COMMON);
}
-int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
@@ -6103,8 +6109,8 @@
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
}
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags)
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags)
{
/* can take a while if any port is running */
int cnt = 5000;
@@ -6154,7 +6160,7 @@
return -EBUSY;
}
-u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
+static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
{
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@
ETH_CONNECTION_TYPE);
}
-int bnx2x_setup_fw_client(struct bnx2x *bp,
- struct bnx2x_client_init_params *params,
- u8 activate,
- struct client_init_ramrod_data *data,
- dma_addr_t data_mapping)
+static int bnx2x_setup_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_init_params *params,
+ u8 activate,
+ struct client_init_ramrod_data *data,
+ dma_addr_t data_mapping)
{
u16 hc_usec;
int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@
return rc;
}
-int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
+static int bnx2x_stop_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_ramrod_params *p)
{
int rc;
@@ -7440,7 +7447,7 @@
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index beb3b7c..bdb68a6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -493,9 +493,9 @@
struct slave *slave;
int i;
- write_lock(&bond->lock);
+ write_lock_bh(&bond->lock);
bond->vlgrp = grp;
- write_unlock(&bond->lock);
+ write_unlock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
struct net_device *slave_dev = slave->dev;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 75bfc3a..09ed3f4 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -31,3 +31,10 @@
Putting the next command and length in the start of the frame can
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
+
+config CAIF_SHM
+ tristate "CAIF shared memory protocol driver"
+ depends on CAIF && U5500_MBOX
+ default n
+ ---help---
+ The CAIF shared memory protocol driver for the STE UX5500 platform.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 3a11d61..b38d987 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -8,3 +8,7 @@
# SPI slave physical interfaces module
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+
+# Shared memory
+caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
+obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644
index 0000000..1cd90da
--- /dev/null
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <mach/mbox.h>
+#include <net/caif/caif_shm.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
+
+#define MAX_SHM_INSTANCES 1
+
+enum {
+ MBX_ACC0,
+ MBX_ACC1,
+ MBX_DSP
+};
+
+static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
+
+static unsigned int shm_start;
+static unsigned int shm_size;
+
+module_param(shm_size, uint , 0440);
+MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
+
+module_param(shm_start, uint , 0440);
+MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
+
+static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
+{
+ /* Always block until msg is written successfully */
+ mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
+ return 0;
+}
+
+static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
+ void *pshm_drv)
+{
+ /*
+ * For UX5500, we have only 1 SHM instance which uses MBX0
+ * for communication with the peer modem
+ */
+ pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
+
+ if (!pshm_dev->hmbx)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+static int __init caif_shmdev_init(void)
+{
+ int i, result;
+
+ /* Loop is currently overkill, there is only one instance */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+
+ shmdev_lyr[i].shm_base_addr = shm_start;
+ shmdev_lyr[i].shm_total_sz = shm_size;
+
+ if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
+ || (shmdev_lyr[i].shm_total_sz <= 0)) {
+ pr_warn("ERROR,"
+ "Shared memory Address and/or Size incorrect"
+ ", Bailing out ...\n");
+ result = -EINVAL;
+ goto clean;
+ }
+
+ pr_info("SHM AREA (instance %d) STARTS"
+ " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
+
+ shmdev_lyr[i].shm_id = i;
+ shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
+ shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
+
+ /*
+ * Finally, CAIF core module is called with details in place:
+ * 1. SHM base address
+ * 2. SHM size
+ * 3. MBX handle
+ */
+ result = caif_shmcore_probe(&shmdev_lyr[i]);
+ if (result) {
+ pr_warn("ERROR[%d],"
+ "Could not probe SHM core (instance %d)"
+ " Bailing out ...\n", result, i);
+ goto clean;
+ }
+ }
+
+ return 0;
+
+clean:
+ /*
+ * For now, we assume that even if one instance of SHM fails, we bail
+ * out of the driver support completely. For this, we need to release
+ * any memory allocated and unregister any instance of SHM net device.
+ */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ if (shmdev_lyr[i].pshm_netdev)
+ unregister_netdev(shmdev_lyr[i].pshm_netdev);
+ }
+ return result;
+}
+
+static void __exit caif_shmdev_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
+ kfree((void *)shmdev_lyr[i].shm_base_addr);
+ }
+
+}
+
+module_init(caif_shmdev_init);
+module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644
index 0000000..19f9c06
--- /dev/null
+++ b/drivers/net/caif/caif_shmcore.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
+ * Daniel Martensson / daniel.martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_shm.h>
+
+#define NR_TX_BUF 6
+#define NR_RX_BUF 6
+#define TX_BUF_SZ 0x2000
+#define RX_BUF_SZ 0x2000
+
+#define CAIF_NEEDED_HEADROOM 32
+
+#define CAIF_FLOW_ON 1
+#define CAIF_FLOW_OFF 0
+
+#define LOW_WATERMARK 3
+#define HIGH_WATERMARK 4
+
+/* Maximum number of CAIF buffers per shared memory buffer. */
+#define SHM_MAX_FRMS_PER_BUF 10
+
+/*
+ * Size in bytes of the descriptor area
+ * (With end of descriptor signalling)
+ */
+#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
+ sizeof(struct shm_pck_desc))
+
+/*
+ * Offset to the first CAIF frame within a shared memory buffer.
+ * Aligned on 32 bytes.
+ */
+#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
+
+/* Number of bytes for CAIF shared memory header. */
+#define SHM_HDR_LEN 1
+
+/* Number of padding bytes for the complete CAIF frame. */
+#define SHM_FRM_PAD_LEN 4
+
+#define CAIF_MAX_MTU 4096
+
+#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
+#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
+
+#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
+#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
+
+#define SHM_FULL_MASK (0x0F << 0)
+#define SHM_EMPTY_MASK (0x0F << 4)
+
+struct shm_pck_desc {
+ /*
+ * Offset from start of shared memory area to start of
+ * shared memory CAIF frame.
+ */
+ u32 frm_ofs;
+ u32 frm_len;
+};
+
+struct buf_list {
+ unsigned char *desc_vptr;
+ u32 phy_addr;
+ u32 index;
+ u32 len;
+ u32 frames;
+ u32 frm_ofs;
+ struct list_head list;
+};
+
+struct shm_caif_frm {
+ /* Number of bytes of padding before the CAIF frame. */
+ u8 hdr_ofs;
+};
+
+struct shmdrv_layer {
+ /* caif_dev_common must always be first in the structure*/
+ struct caif_dev_common cfdev;
+
+ u32 shm_tx_addr;
+ u32 shm_rx_addr;
+ u32 shm_base_addr;
+ u32 tx_empty_available;
+ spinlock_t lock;
+
+ struct list_head tx_empty_list;
+ struct list_head tx_pend_list;
+ struct list_head tx_full_list;
+ struct list_head rx_empty_list;
+ struct list_head rx_pend_list;
+ struct list_head rx_full_list;
+
+ struct workqueue_struct *pshm_tx_workqueue;
+ struct workqueue_struct *pshm_rx_workqueue;
+
+ struct work_struct shm_tx_work;
+ struct work_struct shm_rx_work;
+
+ struct sk_buff_head sk_qhead;
+ struct shmdev_layer *pshm_dev;
+};
+
+static int shm_netdev_open(struct net_device *shm_netdev)
+{
+ netif_wake_queue(shm_netdev);
+ return 0;
+}
+
+static int shm_netdev_close(struct net_device *shm_netdev)
+{
+ netif_stop_queue(shm_netdev);
+ return 0;
+}
+
+int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv;
+ struct list_head *pos;
+ u32 avail_emptybuff = 0;
+ unsigned long flags = 0;
+
+ pshm_drv = (struct shmdrv_layer *)priv;
+
+ /* Check for received buffers. */
+ if (mbx_msg & SHM_FULL_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->rx_empty_list)) {
+
+ /* Release spin lock. */
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("No empty Rx buffers to fill: "
+ "mbx_msg:%x\n", mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_FULL(mbx_msg)) {
+
+ /* We print even in IRQ context... */
+ pr_warn(
+ "phyif_shm_mbx_msg_cb: RX full out of sync:"
+ " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
+ idx, mbx_msg, SHM_GET_FULL(mbx_msg));
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule RX work queue. */
+ if (!work_pending(&pshm_drv->shm_rx_work))
+ queue_work(pshm_drv->pshm_rx_workqueue,
+ &pshm_drv->shm_rx_work);
+ }
+
+ /* Check for emptied buffers. */
+ if (mbx_msg & SHM_EMPTY_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->tx_full_list)) {
+
+ /* We print even in IRQ context... */
+ pr_warn("No TX to empty: msg:%x\n", mbx_msg);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_EMPTY(mbx_msg)) {
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("TX empty "
+ "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+ list_del_init(&pbuf->list);
+
+ /* Reset buffer parameters. */
+ pbuf->frames = 0;
+ pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ /* Check whether we have to wake up the transmitter. */
+ if ((avail_emptybuff > HIGH_WATERMARK) &&
+ (!pshm_drv->tx_empty_available)) {
+ pshm_drv->tx_empty_available = 1;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_ON);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue,
+ &pshm_drv->shm_tx_work);
+ } else
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ }
+
+ return 0;
+
+err_sync:
+ return -EIO;
+}
+
+static void shm_rx_work_func(struct work_struct *rx_work)
+{
+ struct shmdrv_layer *pshm_drv;
+ struct buf_list *pbuf;
+ unsigned long flags = 0;
+ struct sk_buff *skb;
+ char *p;
+ int ret;
+
+ pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
+
+ while (1) {
+
+ struct shm_pck_desc *pck_desc;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for received buffers. */
+ if (list_empty(&pshm_drv->rx_full_list)) {
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ break;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_full_list.next, struct buf_list,
+ list);
+ list_del_init(&pbuf->list);
+
+ /* Retrieve pointer to start of the packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
+
+ /*
+ * Check whether descriptor contains a CAIF shared memory
+ * frame.
+ */
+ while (pck_desc->frm_ofs) {
+ unsigned int frm_buf_ofs;
+ unsigned int frm_pck_ofs;
+ unsigned int frm_pck_len;
+ /*
+ * Check whether offset is within buffer limits
+ * (lower).
+ */
+ if (pck_desc->frm_ofs <
+ (pbuf->phy_addr - pshm_drv->shm_base_addr))
+ break;
+ /*
+ * Check whether offset is within buffer limits
+ * (higher).
+ */
+ if (pck_desc->frm_ofs >
+ ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
+ pbuf->len))
+ break;
+
+ /* Calculate offset from start of buffer. */
+ frm_buf_ofs =
+ pck_desc->frm_ofs - (pbuf->phy_addr -
+ pshm_drv->shm_base_addr);
+
+ /*
+ * Calculate offset and length of CAIF packet while
+ * taking care of the shared memory header.
+ */
+ frm_pck_ofs =
+ frm_buf_ofs + SHM_HDR_LEN +
+ (*(pbuf->desc_vptr + frm_buf_ofs));
+ frm_pck_len =
+ (pck_desc->frm_len - SHM_HDR_LEN -
+ (*(pbuf->desc_vptr + frm_buf_ofs)));
+
+ /* Check whether CAIF packet is within buffer limits */
+ if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
+ break;
+
+ /* Get a suitable CAIF packet and copy in data. */
+ skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
+ frm_pck_len + 1);
+ BUG_ON(skb == NULL);
+
+ p = skb_put(skb, frm_pck_len);
+ memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = pshm_drv->pshm_dev->pshm_netdev;
+
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+
+ if (!ret) {
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_bytes += pck_desc->frm_len;
+ } else
+ ++pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_dropped;
+ /* Move to next packet descriptor. */
+ pck_desc++;
+ }
+
+ list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ }
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+}
+
+static void shm_tx_work_func(struct work_struct *tx_work)
+{
+ u32 mbox_msg;
+ unsigned int frmlen, avail_emptybuff, append = 0;
+ unsigned long flags = 0;
+ struct buf_list *pbuf = NULL;
+ struct shmdrv_layer *pshm_drv;
+ struct shm_caif_frm *frm;
+ struct sk_buff *skb;
+ struct shm_pck_desc *pck_desc;
+ struct list_head *pos;
+
+ pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
+
+ do {
+ /* Initialize mailbox message. */
+ mbox_msg = 0x00;
+ avail_emptybuff = 0;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for pending receive buffers. */
+ if (!list_empty(&pshm_drv->rx_pend_list)) {
+
+ pbuf = list_entry(pshm_drv->rx_pend_list.next,
+ struct buf_list, list);
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
+ /*
+ * Value index is never changed,
+ * so read access should be safe.
+ */
+ mbox_msg |= SHM_SET_EMPTY(pbuf->index);
+ }
+
+ skb = skb_peek(&pshm_drv->sk_qhead);
+
+ if (skb == NULL)
+ goto send_msg;
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ if ((avail_emptybuff < LOW_WATERMARK) &&
+ pshm_drv->tx_empty_available) {
+ /* Update blocking condition. */
+ pshm_drv->tx_empty_available = 0;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_OFF);
+ }
+ /*
+ * We simply return back to the caller if we do not have space
+ * either in Tx pending list or Tx empty list. In this case,
+ * we hold the received skb in the skb list, waiting to
+ * be transmitted once Tx buffers become available
+ */
+ if (list_empty(&pshm_drv->tx_empty_list))
+ goto send_msg;
+
+ /* Get the first free Tx buffer. */
+ pbuf = list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ do {
+ if (append) {
+ skb = skb_peek(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
+ }
+
+ frm = (struct shm_caif_frm *)
+ (pbuf->desc_vptr + pbuf->frm_ofs);
+
+ frm->hdr_ofs = 0;
+ frmlen = 0;
+ frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
+
+ /* Add tail padding if needed. */
+ if (frmlen % SHM_FRM_PAD_LEN)
+ frmlen += SHM_FRM_PAD_LEN -
+ (frmlen % SHM_FRM_PAD_LEN);
+
+ /*
+ * Verify that packet, header and additional padding
+ * can fit within the buffer frame area.
+ */
+ if (frmlen >= (pbuf->len - pbuf->frm_ofs))
+ break;
+
+ if (!append) {
+ list_del_init(&pbuf->list);
+ append = 1;
+ }
+
+ skb = skb_dequeue(&pshm_drv->sk_qhead);
+ /* Copy in CAIF frame. */
+ skb_copy_bits(skb, 0, pbuf->desc_vptr +
+ pbuf->frm_ofs + SHM_HDR_LEN +
+ frm->hdr_ofs, skb->len);
+
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
+ frmlen;
+ dev_kfree_skb(skb);
+
+ /* Fill in the shared memory packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
+ /* Forward to current frame. */
+ pck_desc += pbuf->frames;
+ pck_desc->frm_ofs = (pbuf->phy_addr -
+ pshm_drv->shm_base_addr) +
+ pbuf->frm_ofs;
+ pck_desc->frm_len = frmlen;
+ /* Terminate packet descriptor area. */
+ pck_desc++;
+ pck_desc->frm_ofs = 0;
+ /* Update buffer parameters. */
+ pbuf->frames++;
+ pbuf->frm_ofs += frmlen + (frmlen % 32);
+
+ } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+
+ /* Assign buffer as full. */
+ list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
+ append = 0;
+ mbox_msg |= SHM_SET_FULL(pbuf->index);
+send_msg:
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ if (mbox_msg)
+ pshm_drv->pshm_dev->pshmdev_mbxsend
+ (pshm_drv->pshm_dev->shm_id, mbox_msg);
+ } while (mbox_msg);
+}
+
+static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ unsigned long flags = 0;
+
+ pshm_drv = netdev_priv(shm_netdev);
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ skb_queue_tail(&pshm_drv->sk_qhead, skb);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule Tx work queue. for deferred processing of skbs*/
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = shm_netdev_open,
+ .ndo_stop = shm_netdev_close,
+ .ndo_start_xmit = shm_netdev_tx,
+};
+
+static void shm_netdev_setup(struct net_device *pshm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ pshm_netdev->netdev_ops = &netdev_ops;
+
+ pshm_netdev->mtu = CAIF_MAX_MTU;
+ pshm_netdev->type = ARPHRD_CAIF;
+ pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ pshm_netdev->tx_queue_len = 0;
+ pshm_netdev->destructor = free_netdev;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ /* Initialize structures in a clean state. */
+ memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
+
+ pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
+}
+
+int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
+{
+ int result, j;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
+ "cfshm%d", shm_netdev_setup);
+ if (!pshm_dev->pshm_netdev)
+ return -ENOMEM;
+
+ pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
+ pshm_drv->pshm_dev = pshm_dev;
+
+ /*
+ * Initialization starts with the verification of the
+ * availability of MBX driver by calling its setup function.
+ * MBX driver must be available by this time for proper
+ * functioning of SHM driver.
+ */
+ if ((pshm_dev->pshmdev_mbxsetup
+ (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
+ pr_warn("Could not config. SHM Mailbox,"
+ " Bailing out.....\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENODEV;
+ }
+
+ skb_queue_head_init(&pshm_drv->sk_qhead);
+
+ pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
+ " INSTANCE AT pshm_drv =0x%p\n",
+ pshm_drv->pshm_dev->shm_id, pshm_drv);
+
+ if (pshm_dev->shm_total_sz <
+ (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
+
+ pr_warn("ERROR, Amount of available"
+ " Phys. SHM cannot accomodate current SHM "
+ "driver configuration, Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+
+ pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
+ pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
+
+ if (pshm_dev->shm_loopback)
+ pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
+ else
+ pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
+ (NR_TX_BUF * TX_BUF_SZ);
+
+ INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_full_list);
+
+ INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_full_list);
+
+ INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
+ INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+
+ pshm_drv->pshm_tx_workqueue =
+ create_singlethread_workqueue("shm_tx_work");
+ pshm_drv->pshm_rx_workqueue =
+ create_singlethread_workqueue("shm_rx_work");
+
+ for (j = 0; j < NR_TX_BUF; j++) {
+ struct buf_list *tx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (tx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem. for tx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ tx_buf->index = j;
+ tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
+ tx_buf->len = TX_BUF_SZ;
+ tx_buf->frames = 0;
+ tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ if (pshm_dev->shm_loopback)
+ tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ else
+ tx_buf->desc_vptr =
+ ioremap(tx_buf->phy_addr, TX_BUF_SZ);
+
+ list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
+ }
+
+ for (j = 0; j < NR_RX_BUF; j++) {
+ struct buf_list *rx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (rx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem.for rx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ rx_buf->index = j;
+ rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
+ rx_buf->len = RX_BUF_SZ;
+
+ if (pshm_dev->shm_loopback)
+ rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ else
+ rx_buf->desc_vptr =
+ ioremap(rx_buf->phy_addr, RX_BUF_SZ);
+ list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
+ }
+
+ pshm_drv->tx_empty_available = 1;
+ result = register_netdev(pshm_dev->pshm_netdev);
+ if (result)
+ pr_warn("ERROR[%d], SHM could not, "
+ "register with NW FRMWK Bailing out ...\n", result);
+
+ return result;
+}
+
+void caif_shmcore_remove(struct net_device *pshm_netdev)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ while (!(list_empty(&pshm_drv->tx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ /* Destroy work queues. */
+ destroy_workqueue(pshm_drv->pshm_tx_workqueue);
+ destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+
+ unregister_netdev(pshm_netdev);
+}
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533..8b4cea5 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF SPI driver");
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
static int spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@
module_param(spi_frm_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
module_param(spi_up_head_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
@@ -240,15 +246,13 @@
static const struct file_operations dbgfs_state_fops = {
.open = dbgfs_open,
.read = dbgfs_state,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static const struct file_operations dbgfs_frame_fops = {
.open = dbgfs_open,
.read = dbgfs_frame,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@
u8 *dst = buf;
caif_assert(buf);
+ if (cfspi->slave && !cfspi->slave_talked)
+ cfspi->slave_talked = true;
+
do {
struct sk_buff *skb;
struct caif_payload_info *info;
@@ -357,8 +364,8 @@
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align) {
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1) {
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
*dst = (u8)(spad - 1);
dst += spad;
}
@@ -373,7 +380,7 @@
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
dst += epad;
dev_kfree_skb(skb);
@@ -417,14 +424,14 @@
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align)
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1)
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
/*
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@
} else {
/* Put back packet. */
skb_queue_head(&cfspi->qhead, skb);
+ break;
}
} while (pkts <= CAIF_MAX_SPI_PKTS);
@@ -453,6 +461,15 @@
{
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+ /*
+ * The slave device is the master on the link. Interrupts before the
+ * slave has transmitted are considered spurious.
+ */
+ if (cfspi->slave && !cfspi->slave_talked) {
+ printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+ return;
+ }
+
if (!in_interrupt())
spin_lock(&cfspi->lock);
if (assert) {
@@ -465,7 +482,8 @@
spin_unlock(&cfspi->lock);
/* Wake up the xfer thread. */
- wake_up_interruptible(&cfspi->wait);
+ if (assert)
+ wake_up_interruptible(&cfspi->wait);
}
static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@
* Compute head offset i.e. number of bytes added to
* get the start of the payload aligned.
*/
- if (spi_down_head_align) {
+ if (spi_down_head_align > 1) {
spad = 1 + *src;
src += spad;
}
@@ -564,7 +582,7 @@
* Compute tail offset i.e. number of bytes added to
* get the complete CAIF frame aligned.
*/
- epad = (pkt_len + spad) & spi_down_tail_align;
+ epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
src += epad;
} while ((src - buf) < len);
@@ -625,11 +643,20 @@
cfspi->ndev = ndev;
cfspi->pdev = pdev;
- /* Set flow info */
+ /* Set flow info. */
cfspi->flow_off_sent = 0;
cfspi->qd_low_mark = LOW_WATER_MARK;
cfspi->qd_high_mark = HIGH_WATER_MARK;
+ /* Set slave info. */
+ if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+ cfspi->slave = true;
+ cfspi->slave_talked = false;
+ } else {
+ cfspi->slave = false;
+ cfspi->slave_talked = false;
+ }
+
/* Assign the SPI device. */
cfspi->dev = dev;
/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbf..1b9943a 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@
#endif
int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align = 1 << 1;
+int spi_up_tail_align = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
#ifdef CONFIG_DEBUG_FS
static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9d9e453..080574b 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -82,6 +82,14 @@
---help---
Say Y here if you want to support for Freescale FlexCAN.
+config PCH_CAN
+ tristate "PCH CAN"
+ depends on CAN_DEV && PCI
+ ---help---
+ This driver is for PCH CAN of Topcliff which is an IOH for x86
+ embedded processor.
+ This driver can access CAN bus.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0057537..90af15a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,5 +17,6 @@
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+obj-$(CONFIG_PCH_CAN) += pch_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 2d8bd86..7ef83d0 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1,8 +1,8 @@
/*
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
- * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
- * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
#include <mach/board.h>
-#define DRV_NAME "at91_can"
#define AT91_NAPI_WEIGHT 12
/*
@@ -172,6 +171,7 @@
};
static struct can_bittiming_const at91_bittiming_const = {
+ .name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
.tseg2_min = 2,
@@ -199,13 +199,13 @@
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
- return readl(priv->reg_base + reg);
+ return __raw_readl(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
- writel(value, priv->reg_base + reg);
+ __raw_writel(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+ /* reset acceptance mask and id register */
+ for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
+ at91_write(priv, AT91_MAM(i), 0x0 );
+ at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
+ }
+
/* The last 4 mailboxes are used for transmitting. */
for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_br;
- reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
- ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+ reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
+ ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
((bt->phase_seg2 - 1) << 0);
- dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+ netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
return 0;
}
+static int at91_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_ecr = at91_read(priv, AT91_ECR);
+
+ bec->rxerr = reg_ecr & 0xff;
+ bec->txerr = reg_ecr >> 16;
+
+ return 0;
+}
+
static void at91_chip_start(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+ at91_set_bittiming(dev);
at91_setup_mailboxes(dev);
at91_transceiver_switch(priv, 1);
@@ -350,8 +369,7 @@
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
- dev_err(dev->dev.parent,
- "BUG! TX buffer full when queue awake!\n");
+ netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -435,7 +453,7 @@
struct sk_buff *skb;
struct can_frame *cf;
- dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+ netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
@@ -480,6 +498,9 @@
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+ /* allow RX of extended frames */
+ at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
+
if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
@@ -565,8 +586,8 @@
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
reg_sr & AT91_MB_RX_LOW_MASK)
- dev_info(dev->dev.parent,
- "order of incoming frames cannot be guaranteed\n");
+ netdev_info(dev,
+ "order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@
/* CRC error */
if (reg_sr & AT91_IRQ_CERR) {
- dev_dbg(dev->dev.parent, "CERR irq\n");
+ netdev_dbg(dev, "CERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@
/* Stuffing Error */
if (reg_sr & AT91_IRQ_SERR) {
- dev_dbg(dev->dev.parent, "SERR irq\n");
+ netdev_dbg(dev, "SERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@
/* Acknowledgement Error */
if (reg_sr & AT91_IRQ_AERR) {
- dev_dbg(dev->dev.parent, "AERR irq\n");
+ netdev_dbg(dev, "AERR irq\n");
dev->stats.tx_errors++;
cf->can_id |= CAN_ERR_ACK;
}
/* Form error */
if (reg_sr & AT91_IRQ_FERR) {
- dev_dbg(dev->dev.parent, "FERR irq\n");
+ netdev_dbg(dev, "FERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@
/* Bit Error */
if (reg_sr & AT91_IRQ_BERR) {
- dev_dbg(dev->dev.parent, "BERR irq\n");
+ netdev_dbg(dev, "BERR irq\n");
dev->stats.tx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@
struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
- u32 reg_idr, reg_ier, reg_ecr;
- u8 tec, rec;
+ u32 reg_idr = 0, reg_ier = 0;
+ struct can_berr_counter bec;
- reg_ecr = at91_read(priv, AT91_ECR);
- rec = reg_ecr & 0xff;
- tec = reg_ecr >> 16;
+ at91_get_berr_counter(dev, &bec);
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+ netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
@@ -787,11 +806,11 @@
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+ netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
@@ -804,7 +823,7 @@
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_RESTARTED;
- dev_dbg(dev->dev.parent, "restarted\n");
+ netdev_dbg(dev, "restarted\n");
priv->can.can_stats.restarts++;
netif_carrier_on(dev);
@@ -825,7 +844,7 @@
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
*/
- dev_dbg(dev->dev.parent, "Error Active\n");
+ netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
case CAN_STATE_ERROR_WARNING: /* fallthrough */
@@ -843,7 +862,7 @@
cf->can_id |= CAN_ERR_BUSOFF;
- dev_dbg(dev->dev.parent, "bus-off\n");
+ netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can.can_stats.bus_off++;
@@ -881,7 +900,7 @@
else if (likely(reg_sr & AT91_IRQ_ERRA))
new_state = CAN_STATE_ERROR_ACTIVE;
else {
- dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+ netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
@@ -1018,7 +1037,7 @@
.ndo_start_xmit = at91_start_xmit,
};
-static int __init at91_can_probe(struct platform_device *pdev)
+static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct at91_priv *priv;
@@ -1067,8 +1086,8 @@
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
priv->can.bittiming_const = &at91_bittiming_const;
- priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
@@ -1092,7 +1111,7 @@
return 0;
exit_free:
- free_netdev(dev);
+ free_candev(dev);
exit_iounmap:
iounmap(addr);
exit_release:
@@ -1113,8 +1132,6 @@
platform_set_drvdata(pdev, NULL);
- free_netdev(dev);
-
iounmap(priv->reg_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
@@ -1129,21 +1148,19 @@
.probe = at91_can_probe,
.remove = __devexit_p(at91_can_remove),
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
static int __init at91_can_module_init(void)
{
- printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
return platform_driver_register(&at91_can_driver);
}
static void __exit at91_can_module_exit(void)
{
platform_driver_unregister(&at91_can_driver);
- printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
}
module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
+MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ef443a0..d499056 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -992,7 +992,6 @@
unregister_flexcandev(dev);
platform_set_drvdata(pdev, NULL);
- free_candev(dev);
iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 6aadc3e..7ab534a 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -169,6 +169,7 @@
# define RXBSIDH_SHIFT 3
#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
# define RXBSIDL_IDE 0x08
+# define RXBSIDL_SRR 0x10
# define RXBSIDL_EID 3
# define RXBSIDL_SHIFT 5
#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@
frame->can_id =
(buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+ if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+ frame->can_id |= CAN_RTR_FLAG;
}
/* Data length */
frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644
index 0000000..6727182
--- /dev/null
+++ b/drivers/net/can/pch_can.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_MSG_OBJ 32
+#define MSG_OBJ_RX 0 /* The receive message object flag. */
+#define MSG_OBJ_TX 1 /* The transmit message object flag. */
+
+#define ENABLE 1 /* The enable flag */
+#define DISABLE 0 /* The disable flag */
+#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
+#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
+#define CAN_CTRL_IE_SIE_EIE 0x000e
+#define CAN_CTRL_CCE 0x0040
+#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
+#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
+#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
+#define CAN_CMASK_RX_TX_SET 0x00f3
+#define CAN_CMASK_RX_TX_GET 0x0073
+#define CAN_CMASK_ALL 0xff
+#define CAN_CMASK_RDWR 0x80
+#define CAN_CMASK_ARB 0x20
+#define CAN_CMASK_CTRL 0x10
+#define CAN_CMASK_MASK 0x40
+#define CAN_CMASK_NEWDAT 0x04
+#define CAN_CMASK_CLRINTPND 0x08
+
+#define CAN_IF_MCONT_NEWDAT 0x8000
+#define CAN_IF_MCONT_INTPND 0x2000
+#define CAN_IF_MCONT_UMASK 0x1000
+#define CAN_IF_MCONT_TXIE 0x0800
+#define CAN_IF_MCONT_RXIE 0x0400
+#define CAN_IF_MCONT_RMTEN 0x0200
+#define CAN_IF_MCONT_TXRQXT 0x0100
+#define CAN_IF_MCONT_EOB 0x0080
+#define CAN_IF_MCONT_DLC 0x000f
+#define CAN_IF_MCONT_MSGLOST 0x4000
+#define CAN_MASK2_MDIR_MXTD 0xc000
+#define CAN_ID2_DIR 0x2000
+#define CAN_ID_MSGVAL 0x8000
+
+#define CAN_STATUS_INT 0x8000
+#define CAN_IF_CREQ_BUSY 0x8000
+#define CAN_ID2_XTD 0x4000
+
+#define CAN_REC 0x00007f00
+#define CAN_TEC 0x000000ff
+
+#define PCH_RX_OK 0x00000010
+#define PCH_TX_OK 0x00000008
+#define PCH_BUS_OFF 0x00000080
+#define PCH_EWARN 0x00000040
+#define PCH_EPASSIV 0x00000020
+#define PCH_LEC0 0x00000001
+#define PCH_LEC1 0x00000002
+#define PCH_LEC2 0x00000004
+#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
+#define PCH_STUF_ERR PCH_LEC0
+#define PCH_FORM_ERR PCH_LEC1
+#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
+#define PCH_BIT1_ERR PCH_LEC2
+#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
+#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
+
+/* bit position of certain controller bits. */
+#define BIT_BITT_BRP 0
+#define BIT_BITT_SJW 6
+#define BIT_BITT_TSEG1 8
+#define BIT_BITT_TSEG2 12
+#define BIT_IF1_MCONT_RXIE 10
+#define BIT_IF2_MCONT_TXIE 11
+#define BIT_BRPE_BRPE 6
+#define BIT_ES_TXERRCNT 0
+#define BIT_ES_RXERRCNT 8
+#define MSK_BITT_BRP 0x3f
+#define MSK_BITT_SJW 0xc0
+#define MSK_BITT_TSEG1 0xf00
+#define MSK_BITT_TSEG2 0x7000
+#define MSK_BRPE_BRPE 0x3c0
+#define MSK_BRPE_GET 0x0f
+#define MSK_CTRL_IE_SIE_EIE 0x07
+#define MSK_MCONT_TXIE 0x08
+#define MSK_MCONT_RXIE 0x10
+#define PCH_CAN_NO_TX_BUFF 1
+#define COUNTER_LIMIT 10
+
+#define PCH_CAN_CLK 50000000 /* 50MHz */
+
+/* Define the number of message object.
+ * PCH CAN communications are done via Message RAM.
+ * The Message RAM consists of 32 message objects. */
+#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
+#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
+#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+
+#define PCH_FIFO_THRESH 16
+
+enum pch_can_mode {
+ PCH_CAN_ENABLE,
+ PCH_CAN_DISABLE,
+ PCH_CAN_ALL,
+ PCH_CAN_NONE,
+ PCH_CAN_STOP,
+ PCH_CAN_RUN
+};
+
+struct pch_can_regs {
+ u32 cont;
+ u32 stat;
+ u32 errc;
+ u32 bitt;
+ u32 intr;
+ u32 opt;
+ u32 brpe;
+ u32 reserve1;
+ u32 if1_creq;
+ u32 if1_cmask;
+ u32 if1_mask1;
+ u32 if1_mask2;
+ u32 if1_id1;
+ u32 if1_id2;
+ u32 if1_mcont;
+ u32 if1_dataa1;
+ u32 if1_dataa2;
+ u32 if1_datab1;
+ u32 if1_datab2;
+ u32 reserve2;
+ u32 reserve3[12];
+ u32 if2_creq;
+ u32 if2_cmask;
+ u32 if2_mask1;
+ u32 if2_mask2;
+ u32 if2_id1;
+ u32 if2_id2;
+ u32 if2_mcont;
+ u32 if2_dataa1;
+ u32 if2_dataa2;
+ u32 if2_datab1;
+ u32 if2_datab2;
+ u32 reserve4;
+ u32 reserve5[20];
+ u32 treq1;
+ u32 treq2;
+ u32 reserve6[2];
+ u32 reserve7[56];
+ u32 reserve8[3];
+ u32 srst;
+};
+
+struct pch_can_priv {
+ struct can_priv can;
+ unsigned int can_num;
+ struct pci_dev *dev;
+ unsigned int tx_enable[MAX_MSG_OBJ];
+ unsigned int rx_enable[MAX_MSG_OBJ];
+ unsigned int rx_link[MAX_MSG_OBJ];
+ unsigned int int_enables;
+ unsigned int int_stat;
+ struct net_device *ndev;
+ spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
+ unsigned int msg_obj[MAX_MSG_OBJ];
+ struct pch_can_regs __iomem *regs;
+ struct napi_struct napi;
+ unsigned int tx_obj; /* Point next Tx Obj index */
+ unsigned int use_msi;
+};
+
+static struct can_bittiming_const pch_can_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024, /* 6bit + extended 4bit */
+ .brp_inc = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
+ {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
+
+static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) | mask, addr);
+}
+
+static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) & ~mask, addr);
+}
+
+static void pch_can_set_run_mode(struct pch_can_priv *priv,
+ enum pch_can_mode mode)
+{
+ switch (mode) {
+ case PCH_CAN_RUN:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ case PCH_CAN_STOP:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+ break;
+ }
+}
+
+static void pch_can_set_optmode(struct pch_can_priv *priv)
+{
+ u32 reg_val = ioread32(&priv->regs->opt);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_val |= CAN_OPT_SILENT;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ reg_val |= CAN_OPT_LBACK;
+
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+ iowrite32(reg_val, &priv->regs->opt);
+}
+
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+ /* Clearing the IE, SIE and EIE bits of Can control register. */
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+
+ /* Appropriately setting them. */
+ pch_can_bit_set(&priv->regs->cont,
+ ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
+{
+ /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+ *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+}
+
+static void pch_can_set_int_enables(struct pch_can_priv *priv,
+ enum pch_can_mode interrupt_no)
+{
+ switch (interrupt_no) {
+ case PCH_CAN_ENABLE:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_DISABLE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_ALL:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ case PCH_CAN_NONE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+ break;
+ }
+}
+
+static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
+{
+ u32 counter = COUNTER_LIMIT;
+ u32 ifx_creq;
+
+ iowrite32(num, creq_addr);
+ while (counter) {
+ ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+ if (!ifx_creq)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
+}
+
+static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the receive buffer data from RAM to Interface1 registers */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and RxIE bits */
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and RxIE bits */
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_rx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the Msg buffer from Message RAM to Interface2 registers. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ /* Setting the IF2CMASK register for accessing the
+ MsgVal and TxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and TxIE bits */
+ pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and TxIE bits. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if1_mcont)) &
+ CAN_IF_MCONT_RXIE))
+ *enable = ENABLE;
+ else
+ *enable = DISABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if2_mcont)) &
+ CAN_IF_MCONT_TXIE)) {
+ *enable = ENABLE;
+ } else {
+ *enable = DISABLE;
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static int pch_can_int_pending(struct pch_can_priv *priv)
+{
+ return ioread32(&priv->regs->intr) & 0xffff;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
+ if (set == ENABLE)
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+ else
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 *link)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+
+ if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
+ *link = DISABLE;
+ else
+ *link = ENABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_clear_buffers(struct pch_can_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+ iowrite32(0xffff, &priv->regs->if1_mask1);
+ iowrite32(0xffff, &priv->regs->if1_mask2);
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+ iowrite32(0x0, &priv->regs->if1_mcont);
+ iowrite32(0x0, &priv->regs->if1_dataa1);
+ iowrite32(0x0, &priv->regs->if1_dataa2);
+ iowrite32(0x0, &priv->regs->if1_datab1);
+ iowrite32(0x0, &priv->regs->if1_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ }
+
+ for (i = i; i < PCH_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+ iowrite32(0xffff, &priv->regs->if2_mask1);
+ iowrite32(0xffff, &priv->regs->if2_mask2);
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ iowrite32(0x0, &priv->regs->if2_mcont);
+ iowrite32(0x0, &priv->regs->if2_dataa1);
+ iowrite32(0x0, &priv->regs->if2_dataa2);
+ iowrite32(0x0, &priv->regs->if2_datab1);
+ iowrite32(0x0, &priv->regs->if2_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+}
+
+static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ /* Set FIFO mode set to 0 except last Rx Obj*/
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+ /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+ if (i == (PCH_RX_OBJ_NUM - 1))
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+
+ iowrite32(0, &priv->regs->if1_mask1);
+ pch_can_bit_clear(&priv->regs->if1_mask2,
+ 0x1fff | CAN_MASK2_MDIR_MXTD);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+
+ /* Resetting DIR bit for reception */
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ /* Setting EOB bit for transmitter */
+ iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ iowrite32(0, &priv->regs->if2_mask1);
+ pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_init(struct pch_can_priv *priv)
+{
+ /* Stopping the Can device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Clearing all the message object buffers. */
+ pch_can_clear_buffers(priv);
+
+ /* Configuring the respective message object as either rx/tx object. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Enabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+}
+
+static void pch_can_release(struct pch_can_priv *priv)
+{
+ /* Stooping the CAN device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Disabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ /* Disabling all the receive object. */
+ pch_can_rx_disable_all(priv);
+
+ /* Disabling all the transmit object. */
+ pch_can_tx_disable_all(priv);
+}
+
+/* This function clears interrupt(s) from the CAN device. */
+static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
+{
+ if (mask == CAN_STATUS_INT) {
+ ioread32(&priv->regs->stat);
+ return;
+ }
+
+ /* Clear interrupt for transmit object */
+ if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+ /* Setting CMASK for clearing interrupts for
+ frame transmission. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if2_cmask);
+
+ /* Resetting the ID registers. */
+ pch_can_bit_set(&priv->regs->if2_id2,
+ CAN_ID2_DIR | (0x7ff << 2));
+ iowrite32(0x0, &priv->regs->if2_id1);
+
+ /* Claring NewDat, TxRqst & IntPnd */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+ pch_can_check_if_busy(&priv->regs->if2_creq, mask);
+ } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+ /* Setting CMASK for clearing the reception interrupts. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, mask);
+ }
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+ return (ioread32(&priv->regs->treq1) & 0xffff) |
+ ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+}
+
+static void pch_can_reset(struct pch_can_priv *priv)
+{
+ /* write to sw reset register */
+ iowrite32(1, &priv->regs->srst);
+ iowrite32(0, &priv->regs->srst);
+}
+
+static void pch_can_error(struct net_device *ndev, u32 status)
+{
+ struct sk_buff *skb;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ u32 errc;
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ enum can_state state = priv->can.state;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ if (status & PCH_BUS_OFF) {
+ pch_can_tx_disable_all(priv);
+ pch_can_rx_disable_all(priv);
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(ndev);
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+ dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
+ }
+
+ /* Warning interrupt. */
+ if (status & PCH_EWARN) {
+ state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ if ((errc & CAN_TEC) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ dev_warn(&ndev->dev,
+ "%s -> Error Counter is more than 96.\n", __func__);
+ }
+ /* Error passive interrupt. */
+ if (status & PCH_EPASSIV) {
+ priv->can.can_stats.error_passive++;
+ state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if ((errc & CAN_TEC) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ dev_err(&ndev->dev,
+ "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
+ }
+
+ if (status & PCH_LEC_ALL) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ switch (status & PCH_LEC_ALL) {
+ case PCH_STUF_ERR:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case PCH_FORM_ERR:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case PCH_ACK_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL;
+ break;
+ case PCH_BIT1_ERR:
+ case PCH_BIT0_ERR:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case PCH_CRC_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ break;
+ default:
+ iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
+ break;
+ }
+
+ }
+
+ priv->can.state = state;
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+{
+ u32 reg;
+ canid_t id;
+ u32 ide;
+ u32 rtr;
+ int i, j, k;
+ int rcv_pkts = 0;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+
+ /* Reading the MCONT register. */
+ reg = ioread32(&priv->regs->if1_mcont);
+ reg &= 0xffff;
+
+ for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+ /* If MsgLost bit set. */
+ if (reg & CAN_IF_MCONT_MSGLOST) {
+ dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_MSGLOST);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ goto RX_NEXT;
+ }
+ if (!(reg & CAN_IF_MCONT_NEWDAT))
+ goto RX_NEXT;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Get Received data */
+ ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+ if (ide) {
+ id = (ioread32(&priv->regs->if1_id1) & 0xffff);
+ id |= (((ioread32(&priv->regs->if1_id2)) &
+ 0x1fff) << 16);
+ cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ } else {
+ id = (((ioread32(&priv->regs->if1_id2)) &
+ (CAN_SFF_MASK << 2)) >> 2);
+ cf->can_id = (id & CAN_SFF_MASK);
+ }
+
+ rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
+ if (rtr) {
+ cf->can_dlc = 0;
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
+ 0x0f);
+ }
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ reg = ioread32(&priv->regs->if1_dataa1 + j*4);
+ cf->data[i++] = cpu_to_le32(reg & 0xff);
+ if (i == cf->can_dlc)
+ break;
+ cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+ }
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ if (k < PCH_FIFO_THRESH) {
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
+ CAN_CMASK_ARB, &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_INTPND);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+ } else if (k > PCH_FIFO_THRESH) {
+ pch_can_int_clr(priv, k);
+ } else if (k == PCH_FIFO_THRESH) {
+ int cnt;
+ for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+ pch_can_int_clr(priv, cnt+1);
+ }
+RX_NEXT:
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
+ reg = ioread32(&priv->regs->if1_mcont);
+ }
+
+ return rcv_pkts;
+}
+static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ u32 dlc;
+ u32 int_stat;
+ int rcv_pkts = 0;
+ u32 reg_stat;
+ unsigned long flags;
+
+ int_stat = pch_can_int_pending(priv);
+ if (!int_stat)
+ return 0;
+
+INT_STAT:
+ if (int_stat == CAN_STATUS_INT) {
+ reg_stat = ioread32(&priv->regs->stat);
+ if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
+ if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
+ pch_can_error(ndev, reg_stat);
+ }
+
+ if (reg_stat & PCH_TX_OK) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq,
+ ioread32(&priv->regs->intr));
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+ }
+
+ if (reg_stat & PCH_RX_OK)
+ pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ }
+
+MSG_OBJ:
+ if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ rcv_pkts = pch_can_rx_normal(ndev, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (rcv_pkts < 0)
+ return 0;
+ } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
+ if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+ /* Handle transmission interrupt */
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+ &priv->regs->if2_cmask);
+ dlc = ioread32(&priv->regs->if2_mcont) &
+ CAN_IF_MCONT_DLC;
+ pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (dlc > 8)
+ dlc = 8;
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ }
+ }
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ else if (int_stat >= 1 && int_stat <= 32)
+ goto MSG_OBJ;
+
+ napi_complete(napi);
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+
+ return rcv_pkts;
+}
+
+static int pch_set_bittiming(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 canbit;
+ u32 bepe;
+ u32 brp;
+
+ /* Setting the CCE bit for accessing the Can Timing register. */
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+
+ brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
+ canbit = brp & MSK_BITT_BRP;
+ canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
+ canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
+ canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
+ bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+ iowrite32(canbit, &priv->regs->bitt);
+ iowrite32(bepe, &priv->regs->brpe);
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+
+ return 0;
+}
+
+static void pch_can_start(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ if (priv->can.state != CAN_STATE_STOPPED)
+ pch_can_reset(priv);
+
+ pch_set_bittiming(ndev);
+ pch_can_set_optmode(priv);
+
+ pch_can_tx_enable_all(priv);
+ pch_can_rx_enable_all(priv);
+
+ /* Setting the CAN to run mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return;
+}
+
+static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ pch_can_start(ndev);
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int pch_can_open(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ int retval;
+
+ retval = pci_enable_msi(priv->dev);
+ if (retval) {
+ dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
+ priv->use_msi = 0;
+ } else {
+ dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
+ priv->use_msi = 1;
+ }
+
+ /* Regsitering the interrupt. */
+ retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
+ ndev->name, ndev);
+ if (retval) {
+ dev_err(&ndev->dev, "request_irq failed.\n");
+ goto req_irq_err;
+ }
+
+ /* Open common can device */
+ retval = open_candev(ndev);
+ if (retval) {
+ dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+ goto err_open_candev;
+ }
+
+ pch_can_init(priv);
+ pch_can_start(ndev);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_open_candev:
+ free_irq(priv->dev->irq, ndev);
+req_irq_err:
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+
+ pch_can_release(priv);
+
+ return retval;
+}
+
+static int pch_close(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ pch_can_release(priv);
+ free_irq(priv->dev->irq, ndev);
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+ close_candev(ndev);
+ priv->can.state = CAN_STATE_STOPPED;
+ return 0;
+}
+
+static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
+{
+ u32 buffer_status = 0;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ /* Getting the message object status. */
+ buffer_status = (u32) pch_can_get_buffer_status(priv);
+
+ return buffer_status & obj_id;
+}
+
+
+static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int i, j;
+ unsigned long flags;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ int tx_buffer_avail = 0;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
+ while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
+ PCH_RX_OBJ_NUM)))
+ udelay(500);
+
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
+ tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+ } else {
+ tx_buffer_avail = priv->tx_obj;
+ }
+ priv->tx_obj++;
+
+ /* Attaining the lock. */
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ /* Reading the Msg Obj from the Msg RAM to the Interface register. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ /* Setting the CMASK register. */
+ pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+
+ /* If ID extended is set. */
+ pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
+ pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+ if (cf->can_id & CAN_EFF_FLAG) {
+ pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+ } else {
+ pch_can_bit_set(&priv->regs->if2_id1, 0);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ (cf->can_id & CAN_SFF_MASK) << 2);
+ }
+
+ /* If remote frame has to be transmitted.. */
+ if (cf->can_id & CAN_RTR_FLAG)
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ iowrite32(le32_to_cpu(cf->data[i++]),
+ (&priv->regs->if2_dataa1) + j*4);
+ if (i == cf->can_dlc)
+ break;
+ iowrite32(le32_to_cpu(cf->data[i++] << 8),
+ (&priv->regs->if2_dataa1) + j*4);
+ }
+
+ can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+
+ /* Updating the size of the data. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
+ pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+
+ /* Clearing IntPend, NewDat & TxRqst */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+
+ /* Setting NewDat, TxRqst bits */
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops pch_can_netdev_ops = {
+ .ndo_open = pch_can_open,
+ .ndo_stop = pch_close,
+ .ndo_start_xmit = pch_xmit,
+};
+
+static void __devexit pch_can_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(priv->ndev);
+ free_candev(priv->ndev);
+ pci_iounmap(pdev, priv->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pch_can_reset(priv);
+}
+
+#ifdef CONFIG_PM
+static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return value. */
+ u32 buf_stat; /* Variable for reading the transmit buffer status. */
+ u32 counter = 0xFFFFFF;
+
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ /* Stop the CAN controller */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Indicate that we are aboutto/in suspend */
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ /* Waiting for all transmission to complete. */
+ while (counter) {
+ buf_stat = pch_can_get_buffer_status(priv);
+ if (!buf_stat)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+
+ /* Save interrupt configuration and then disable them */
+ pch_can_get_int_enables(priv, &(priv->int_enables));
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Save Tx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_get_tx_enable(priv, i + 1,
+ &(priv->tx_enable[i]));
+ }
+
+ /* Disable all Transmit buffers */
+ pch_can_tx_disable_all(priv);
+
+ /* Save Rx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ pch_can_get_rx_enable(priv, i + 1,
+ &(priv->rx_enable[i]));
+ pch_can_get_rx_buffer_link(priv, i + 1,
+ &(priv->rx_link[i]));
+ }
+ }
+
+ /* Disable all Receive buffers */
+ pch_can_rx_disable_all(priv);
+ retval = pci_save_state(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_save_state failed.\n");
+ } else {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ }
+
+ return retval;
+}
+
+static int pch_can_resume(struct pci_dev *pdev)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return variable. */
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_enable_device failed.\n");
+ return retval;
+ }
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Disabling all interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Setting the CAN device in Stop Mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Configuring the transmit and receive buffers. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Restore the CAN state */
+ pch_set_bittiming(dev);
+
+ /* Listen/Active */
+ pch_can_set_optmode(priv);
+
+ /* Enabling the transmit buffer. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ pch_can_set_tx_enable(priv, i + 1,
+ priv->tx_enable[i]);
+ }
+ }
+
+ /* Configuring the receive buffer and enabling them. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ /* Restore buffer link */
+ pch_can_set_rx_buffer_link(priv, i + 1,
+ priv->rx_link[i]);
+
+ /* Restore buffer enables */
+ pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
+ }
+ }
+
+ /* Enable CAN Interrupts */
+ pch_can_set_int_custom(priv);
+
+ /* Restore Run Mode */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ return retval;
+}
+#else
+#define pch_can_suspend NULL
+#define pch_can_resume NULL
+#endif
+
+static int pch_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
+ bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+
+ return 0;
+}
+
+static int __devinit pch_can_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ struct pch_can_priv *priv;
+ int rc;
+ int index;
+ void __iomem *addr;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
+ goto probe_exit_endev;
+ }
+
+ rc = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
+ goto probe_exit_pcireq;
+ }
+
+ addr = pci_iomap(pdev, 1, 0);
+ if (!addr) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "Failed pci_iomap\n");
+ goto probe_exit_ipmap;
+ }
+
+ ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+ if (!ndev) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "Failed alloc_candev\n");
+ goto probe_exit_alloc_candev;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->dev = pdev;
+ priv->can.bittiming_const = &pch_can_bittiming_const;
+ priv->can.do_set_mode = pch_can_do_set_mode;
+ priv->can.do_get_berr_counter = pch_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_LOOPBACK;
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+
+ ndev->irq = pdev->irq;
+ ndev->flags |= IFF_ECHO;
+
+ pci_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &pch_can_netdev_ops;
+
+ priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
+ for (index = 0; index < PCH_RX_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_RX;
+
+ for (index = index; index < PCH_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_TX;
+
+ netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+
+ rc = register_candev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
+ goto probe_exit_reg_candev;
+ }
+
+ return 0;
+
+probe_exit_reg_candev:
+ free_candev(ndev);
+probe_exit_alloc_candev:
+ pci_iounmap(pdev, addr);
+probe_exit_ipmap:
+ pci_release_regions(pdev);
+probe_exit_pcireq:
+ pci_disable_device(pdev);
+probe_exit_endev:
+ return rc;
+}
+
+static struct pci_driver pch_can_pci_driver = {
+ .name = "pch_can",
+ .id_table = pch_pci_tbl,
+ .probe = pch_can_probe,
+ .remove = __devexit_p(pch_can_remove),
+ .suspend = pch_can_suspend,
+ .resume = pch_can_resume,
+};
+
+static int __init pch_can_pci_init(void)
+{
+ return pci_register_driver(&pch_can_pci_driver);
+}
+module_init(pch_can_pci_init);
+
+static void __exit pch_can_pci_exit(void)
+{
+ pci_unregister_driver(&pch_can_pci_driver);
+}
+module_exit(pch_can_pci_exit);
+
+MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ae3505a..6fdc031 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -58,4 +58,16 @@
- esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config CAN_TSCAN1
+ tristate "TS-CAN1 PC104 boards"
+ depends on ISA
+ help
+ This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+ http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ The driver supports multiple boards and automatically configures them:
+ PLD IO base addresses are read from jumpers JP1 and JP2,
+ IRQ numbers are read from jumpers JP4 and JP5,
+ SJA1000 IO base addresses are chosen heuristically (first that works).
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index ce92455..2c591eb 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -9,5 +9,6 @@
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644
index 0000000..9756099
--- /dev/null
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -0,0 +1,216 @@
+/*
+ * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
+ *
+ * Copyright 2010 Andre B. Oliveira
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * References:
+ * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
+ * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "sja1000.h"
+
+MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
+MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
+MODULE_LICENSE("GPL");
+
+/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
+#define TSCAN1_MAXDEV 4
+
+/* PLD registers address offsets */
+#define TSCAN1_ID1 0
+#define TSCAN1_ID2 1
+#define TSCAN1_VERSION 2
+#define TSCAN1_LED 3
+#define TSCAN1_PAGE 4
+#define TSCAN1_MODE 5
+#define TSCAN1_JUMPERS 6
+
+/* PLD board identifier registers magic values */
+#define TSCAN1_ID1_VALUE 0xf6
+#define TSCAN1_ID2_VALUE 0xb9
+
+/* PLD mode register SJA1000 IO enable bit */
+#define TSCAN1_MODE_ENABLE 0x40
+
+/* PLD jumpers register bits */
+#define TSCAN1_JP4 0x10
+#define TSCAN1_JP5 0x20
+
+/* PLD IO base addresses start */
+#define TSCAN1_PLD_ADDRESS 0x150
+
+/* PLD register space size */
+#define TSCAN1_PLD_SIZE 8
+
+/* SJA1000 register space size */
+#define TSCAN1_SJA1000_SIZE 32
+
+/* SJA1000 crystal frequency (16MHz) */
+#define TSCAN1_SJA1000_XTAL 16000000
+
+/* SJA1000 IO base addresses */
+static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
+ 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
+};
+
+/* Read SJA1000 register */
+static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+/* Write SJA1000 register */
+static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
+static int __devinit tscan1_probe(struct device *dev, unsigned id)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+ int irq, i;
+
+ pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
+ if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
+ return -EBUSY;
+
+ if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
+ inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENODEV;
+ }
+
+ switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
+ case TSCAN1_JP4:
+ irq = 6;
+ break;
+ case TSCAN1_JP5:
+ irq = 7;
+ break;
+ case TSCAN1_JP4 | TSCAN1_JP5:
+ irq = 5;
+ break;
+ default:
+ dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -EINVAL;
+ }
+
+ netdev = alloc_sja1000dev(0);
+ if (!netdev) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, netdev);
+ SET_NETDEV_DEV(netdev, dev);
+
+ netdev->base_addr = pld_base;
+ netdev->irq = irq;
+
+ priv = netdev_priv(netdev);
+ priv->read_reg = tscan1_read;
+ priv->write_reg = tscan1_write;
+ priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
+ priv->cdr = CDR_CBP | CDR_CLK_OFF;
+ priv->ocr = OCR_TX0_PUSHPULL;
+
+ /* Select the first SJA1000 IO address that is free and that works */
+ for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
+ sja1000_base = tscan1_sja1000_addresses[i];
+ if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
+ dev_name(dev)))
+ continue;
+
+ /* Set SJA1000 IO base address and enable it */
+ outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
+
+ priv->reg_base = (void __iomem *)sja1000_base;
+ if (!register_sja1000dev(netdev)) {
+ /* SJA1000 probe succeeded; turn LED off and return */
+ outb(0, pld_base + TSCAN1_LED);
+ netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
+ pld_base, sja1000_base, irq);
+ return 0;
+ }
+
+ /* SJA1000 probe failed; release and try next address */
+ outb(0, pld_base + TSCAN1_MODE);
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ }
+
+ dev_err(dev, "failed to assign SJA1000 IO address\n");
+ dev_set_drvdata(dev, NULL);
+ free_sja1000dev(netdev);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENXIO;
+}
+
+static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+
+ netdev = dev_get_drvdata(dev);
+ unregister_sja1000dev(netdev);
+ dev_set_drvdata(dev, NULL);
+
+ priv = netdev_priv(netdev);
+ pld_base = netdev->base_addr;
+ sja1000_base = (unsigned long)priv->reg_base;
+
+ outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
+
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+
+ free_sja1000dev(netdev);
+
+ return 0;
+}
+
+static struct isa_driver tscan1_isa_driver = {
+ .probe = tscan1_probe,
+ .remove = __devexit_p(tscan1_remove),
+ .driver = {
+ .name = "tscan1",
+ },
+};
+
+static int __init tscan1_init(void)
+{
+ return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
+}
+module_init(tscan1_init);
+
+static void __exit tscan1_exit(void)
+{
+ isa_unregister_driver(&tscan1_isa_driver);
+}
+module_exit(tscan1_exit);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index a04ce6a..046d846 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1266,11 +1266,13 @@
}
if (!(adap->flags & QUEUES_BOUND)) {
- err = bind_qsets(adap);
- if (err) {
- CH_ERR(adap, "failed to bind qsets, err %d\n", err);
+ int ret = bind_qsets(adap);
+
+ if (ret < 0) {
+ CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
free_irq_resources(adap);
+ err = ret;
goto out;
}
adap->flags |= QUEUES_BOUND;
@@ -3299,7 +3301,6 @@
pi->rx_offload = T3_RX_CSUM | T3_LRO;
pi->port_id = i;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 5d72bda..f9f6645 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -296,8 +296,10 @@
if (d->skb) { /* an SGL is present */
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
- if (d->eop)
+ if (d->eop) {
kfree_skb(d->skb);
+ d->skb = NULL;
+ }
}
++d;
if (++cidx == q->size) {
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index eaa49e4..3d4253d3 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -281,7 +281,6 @@
struct port_info {
struct adapter *adapter;
- struct vlan_group *vlan_grp;
u16 viid;
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 87054e0..f50bc98 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -403,7 +403,7 @@
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
- pi->vlan_grp != NULL, true);
+ !!(dev->features & NETIF_F_HW_VLAN_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@
static int set_flags(struct net_device *dev, u32 flags)
{
- return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
+ int err;
+ unsigned long old_feat = dev->features;
+
+ err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+ if (err)
+ return err;
+
+ if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
+ const struct port_info *pi = netdev_priv(dev);
+
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
+ true);
+ if (err)
+ dev->features = old_feat;
+ }
+ return err;
}
static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@
return 0;
}
-static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct port_info *pi = netdev_priv(dev);
-
- pi->vlan_grp = grp;
- t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
- grp != NULL, true);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
@@ -2878,7 +2886,6 @@
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
- .ndo_vlan_rx_register = vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
@@ -3658,7 +3665,6 @@
pi->rx_offload = RX_CSO;
pi->port_id = i;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 9967f3d..17022258 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1530,18 +1530,11 @@
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
- struct port_info *pi = netdev_priv(rxq->rspq.netdev);
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp)) {
- ret = vlan_gro_frags(&rxq->rspq.napi, grp,
- ntohs(pkt->vlan));
- goto stats;
- }
}
ret = napi_gro_frags(&rxq->rspq.napi);
-stats: if (ret == GRO_HELD)
+ if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp))
- vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
- else
- dev_kfree_skb_any(skb);
- } else
- netif_receive_skb(skb);
-
+ }
+ netif_receive_skb(skb);
return 0;
}
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5..6de5e2e 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2600,7 +2600,6 @@
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index a117f2a..4686c39 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -521,7 +521,7 @@
e1000_clean_all_rx_rings(adapter);
}
-void e1000_reinit_safe(struct e1000_adapter *adapter)
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index ca663f1..7236f1a 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,10 @@
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
@@ -1243,6 +1247,39 @@
}
/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ bool phy_hung = false;
+ s32 ret_val = 0;
+
+ /*
+ * Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
+
+ if (ret_val)
+ goto out;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
+ if (ret_val)
+ goto out;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ phy_hung = true;
+ }
+out:
+ return phy_hung;
+}
+
+/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
@@ -1859,6 +1896,7 @@
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_CHECK_PHY_HANG,
.pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cee882d..fdc67fe 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -397,6 +397,7 @@
struct work_struct print_hang_task;
bool idle_check;
+ int phy_hang_count;
};
struct e1000_info {
@@ -454,6 +455,7 @@
#define FLAG2_HAS_EEE (1 << 5)
#define FLAG2_DMA_BURST (1 << 6)
#define FLAG2_DISABLE_AIM (1 << 8)
+#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -631,6 +633,7 @@
extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+extern bool e1000_check_phy_82574(struct e1000_hw *hw);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ec8cf3f..c4ca162 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4098,6 +4098,25 @@
}
}
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * With 82574 controllers, PHY needs to be checked periodically
+ * for hung state and reset, if two calls return true
+ */
+ if (e1000_check_phy_82574(hw))
+ adapter->phy_hang_count++;
+ else
+ adapter->phy_hang_count = 0;
+
+ if (adapter->phy_hang_count > 1) {
+ adapter->phy_hang_count = 0;
+ schedule_work(&adapter->reset_task);
+ }
+}
+
/**
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -4333,6 +4352,9 @@
if (e1000e_get_laa_state_82571(hw))
e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+ if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+ e1000e_check_82574_phy_workaround(adapter);
+
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -4860,8 +4882,11 @@
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
- e1000e_dump(adapter);
- e_err("Reset adapter\n");
+ if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
+ }
e1000e_reinit_locked(adapter);
}
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1321cb6..8e745e7 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -396,7 +396,9 @@
int swqe_ll_count;
u32 swqe_id_counter;
u64 tx_packets;
+ u64 tx_bytes;
u64 rx_packets;
+ u64 rx_bytes;
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index bb7d306..182b2a7 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -330,7 +330,7 @@
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
- u64 hret, rx_packets, tx_packets;
+ u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i;
memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0;
- for (i = 0; i < port->num_def_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets;
+ rx_bytes += port->port_res[i].rx_bytes;
+ }
tx_packets = 0;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
+ tx_bytes += port->port_res[i].tx_bytes;
+ }
stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
- stats->rx_bytes = cb2->rxo;
- stats->tx_bytes = cb2->txo;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
out_herr:
@@ -703,6 +707,7 @@
int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3;
+ u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -760,6 +765,7 @@
processed_rq3++;
}
+ processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb);
} else {
pr->p_stats.poll_receive_errors++;
@@ -775,6 +781,7 @@
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
+ pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1516,20 @@
enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO;
+ u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+ tx_bytes = pr->tx_bytes;
+ tx_packets = pr->tx_packets;
+ rx_bytes = pr->rx_bytes;
+ rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res));
+ pr->tx_bytes = rx_bytes;
+ pr->tx_packets = tx_packets;
+ pr->rx_bytes = rx_bytes;
+ pr->rx_packets = rx_packets;
+
pr->port = port;
spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2267,14 @@
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
+ if (vlan_tx_tag_present(skb)) {
+ swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+ swqe->vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ pr->tx_packets++;
+ pr->tx_bytes += skb->len;
+
if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2305,6 @@
}
pr->swqe_id_counter += 1;
- if (vlan_tx_tag_present(skb)) {
- swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
- swqe->vlan_tag = vlan_tx_tag_get(skb);
- }
-
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2316,6 @@
}
ehea_post_swqe(pr->qp, swqe);
- pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4c4cc80..49e4ce1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2511,7 +2511,7 @@
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else
dev_kfree_skb_any(skb);
@@ -2594,7 +2594,7 @@
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- skb = __skb_dequeue(&priv->rx_recycle);
+ skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);
@@ -2750,7 +2750,7 @@
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc32..06bb9b7 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@
SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 14db09e..892d196 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4107,7 +4107,6 @@
netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
- struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
int tso = 0, count;
u32 tx_flags = 0;
u16 first;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index ebfaa68..28af019 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2783,15 +2783,15 @@
/* reset the hardware with the new settings */
igbvf_reset(adapter);
- /* tell the stack to leave us alone until igbvf_open() is called */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_hw_init;
+ /* tell the stack to leave us alone until igbvf_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
igbvf_print_device_info(adapter);
igbvf_initialize_last_counter_stats(adapter);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 666207a..caa8192 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -533,6 +533,7 @@
pci_release_regions(pdev);
free_netdev(netdev);
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 8bb9ddb..0d44c64 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -43,9 +43,12 @@
* ixgbe_dcb_check_config().
*/
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
- u8 direction)
+ int max_frame, u8 direction)
{
struct tc_bw_alloc *p;
+ int min_credit;
+ int min_multiplier;
+ int min_percent = 100;
s32 ret_val = 0;
/* Initialization values default for Tx settings */
u32 credit_refill = 0;
@@ -59,6 +62,31 @@
goto out;
}
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
/* Find out the link percentage for each TC first */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
@@ -73,8 +101,9 @@
/* Save link_percentage for reference */
p->link_percent = (u8)link_percentage;
- /* Calculate credit refill and save it */
- credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ MAX_CREDIT_REFILL);
p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */
@@ -85,8 +114,8 @@
* of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration.
*/
- if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
- credit_max = MINIMUM_CREDIT_FOR_JUMBO;
+ if (credit_max && (credit_max < min_credit))
+ credit_max = min_credit;
if (direction == DCB_TX_CONFIG) {
/*
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index eb1059f..0208a87 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,15 +150,14 @@
/* DCB driver APIs */
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
+#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
-#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
-#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 67c219f..05f2247 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -397,6 +397,11 @@
reg &= ~IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 18d7fbf..3841649 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -95,6 +95,9 @@
#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
+
/* DCB hardware-specific driver APIs */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f856312..2bd3eb4 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3347,6 +3347,7 @@
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
u32 txdctl;
int i, j;
@@ -3359,8 +3360,15 @@
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
+#ifdef CONFIG_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+ ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ DCB_RX_CONFIG);
/* reconfigure the hardware */
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d7a975e..c57d9a4 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1623,12 +1623,12 @@
return rc;
}
-#ifdef CONFIG_PM
static void
jme_set_100m_half(struct jme_adapter *jme)
{
u32 bmcr, tmp;
+ jme_phy_on(jme);
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@
phylink = jme_linkstat_from_phy(jme);
}
}
-#endif
static inline void
jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
}
+static void
+jme_powersave_phy(struct jme_adapter *jme)
+{
+ if (jme->reg_pmcs) {
+ jme_set_100m_half(jme);
+
+ if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ jme_wait_link(jme);
+
+ jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ } else {
+ jme_phy_off(jme);
+ }
+}
+
static int
jme_close(struct net_device *netdev)
{
@@ -2941,11 +2955,7 @@
* Tell stack that we are not ready to work until open()
*/
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- /*
- * Register netdev
- */
rc = register_netdev(netdev);
if (rc) {
pr_err("Cannot register net device\n");
@@ -2991,6 +3001,16 @@
}
+static void
+jme_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme_powersave_phy(jme);
+ pci_pme_active(pdev, true);
+}
+
#ifdef CONFIG_PM
static int
jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3048,9 @@
tasklet_hi_enable(&jme->rxempty_task);
pci_save_state(pdev);
- if (jme->reg_pmcs) {
- jme_set_100m_half(jme);
-
- if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
- jme_wait_link(jme);
-
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
- pci_enable_wake(pdev, PCI_D3cold, true);
- } else {
- jme_phy_off(jme);
- }
- pci_set_power_state(pdev, PCI_D3cold);
+ jme_powersave_phy(jme);
+ pci_enable_wake(jme->pdev, PCI_D3hot, true);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -3087,6 +3097,7 @@
.suspend = jme_suspend,
.resume = jme_resume,
#endif /* CONFIG_PM */
+ .shutdown = jme_shutdown,
};
static int __init
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 316bb70..e7030ce 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -1077,7 +1077,6 @@
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
- netif_start_queue(dev);
ei_local->tx1 = ei_local->tx2 = 0;
ei_local->txing = 0;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 4297f6e..f69e73e 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -515,14 +515,15 @@
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
- if (work_done < budget)
+ if (work_done < budget) {
napi_complete(napi);
- /*
- * We've done what we can to clean the buffers. Make sure we
- * get notified when new packets arrive.
- */
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ /*
+ * We've done what we can to clean the buffers. Make sure we
+ * get notified when new packets arrive.
+ */
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
/* TODO: Handle errors */
@@ -550,12 +551,16 @@
}
if (status & MACB_RX_INT_FLAGS) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers. The
+ * scheduling call may fail if the poll routine
+ * is already scheduled, so disable interrupts
+ * now.
+ */
+ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+
if (napi_schedule_prep(&bp->napi)) {
- /*
- * There's no point taking any more interrupts
- * until we have processed the buffers
- */
- macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
dev_dbg(&bp->pdev->dev,
"scheduling RX softirq\n");
__napi_schedule(&bp->napi);
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b07e4de..02393fd 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -210,38 +210,12 @@
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B);
}
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
-{
- struct mlx4_cmd_mailbox *mailbox;
- __be64 *inbox;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- inbox = mailbox->buf;
-
- inbox[0] = cpu_to_be64(virt);
- inbox[1] = cpu_to_be64(dma_addr);
-
- err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
-
- if (!err)
- mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
- (unsigned long long) dma_addr, (unsigned long long) virt);
-
- return err;
-}
-
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index ab56a2f..b10c07a 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -128,8 +128,6 @@
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 606aa58..8674ad5 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -111,6 +111,12 @@
goto out;
}
}
+
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -205,6 +211,11 @@
}
}
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 1261212..f7d06cb 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -255,19 +255,6 @@
}
static void
-nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
-{
-
- netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
- adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
- NX_CDRP_CMD_DESTROY_RX_CTX);
-
- netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
- adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
- NX_CDRP_CMD_DESTROY_TX_CTX);
-}
-
-static void
nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -698,8 +685,6 @@
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
goto done;
- if (reset_devices)
- nx_fw_cmd_reset_ctx(adapter);
err = nx_fw_cmd_create_rx_ctx(adapter);
if (err)
goto err_out_free;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 50820bea..e1d30d7 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
@@ -1240,7 +1237,6 @@
dev_warn(&pdev->dev, "failed to read mac addr\n");
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
@@ -1356,6 +1352,13 @@
break;
}
+ if (reset_devices) {
+ if (adapter->portnum == 0) {
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
+ adapter->need_fw_reset = 1;
+ }
+ }
+
err = netxen_start_firmware(adapter);
if (err)
goto err_out_decr_ref;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 03096c8..d05c446 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@
PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
+ PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e2afdce..f0bd1a1 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -74,8 +74,8 @@
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
-#define MII_88EC048_PHY_MSCR1_REG 16
-#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6)
+#define MII_88E1318S_PHY_MSCR1_REG 16
+#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
#define MII_88E1121_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_PAGE 3
@@ -240,7 +240,7 @@
return err;
}
-static int m88ec048_config_aneg(struct phy_device *phydev)
+static int m88e1318_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
@@ -251,10 +251,10 @@
if (err < 0)
return err;
- mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG);
- mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD;
+ mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
+ mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
- err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+ err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
if (err < 0)
return err;
@@ -659,12 +659,12 @@
.driver = { .owner = THIS_MODULE },
},
{
- .phy_id = MARVELL_PHY_ID_88EC048,
+ .phy_id = MARVELL_PHY_ID_88E1318S,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88EC048",
+ .name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &m88ec048_config_aneg,
+ .config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1bb16cb7..7670aac 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -65,7 +65,7 @@
*
* Returns 0 on success on < 0 on error.
*/
-int phy_clear_interrupt(struct phy_device *phydev)
+static int phy_clear_interrupt(struct phy_device *phydev)
{
int err = 0;
@@ -82,7 +82,7 @@
*
* Returns 0 on success on < 0 on error.
*/
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
int err = 0;
@@ -208,7 +208,7 @@
* duplexes. Drop down by one in this order: 1000/FULL,
* 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
*/
-void phy_sanitize_settings(struct phy_device *phydev)
+static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
int idx;
@@ -223,7 +223,6 @@
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
}
-EXPORT_SYMBOL(phy_sanitize_settings);
/**
* phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_enable_interrupts(struct phy_device *phydev)
+static int phy_enable_interrupts(struct phy_device *phydev)
{
int err;
@@ -545,13 +544,12 @@
return err;
}
-EXPORT_SYMBOL(phy_enable_interrupts);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_disable_interrupts(struct phy_device *phydev)
+static int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
@@ -574,7 +572,6 @@
return err;
}
-EXPORT_SYMBOL(phy_disable_interrupts);
/**
* phy_start_interrupts - request and enable interrupts for a PHY device
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 16ddc77..993c52c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -57,6 +57,9 @@
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface);
+
/*
* Creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@
}
EXPORT_SYMBOL(phy_scan_fixups);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+static struct phy_device* phy_device_create(struct mii_bus *bus,
+ int addr, int phy_id)
{
struct phy_device *dev;
@@ -193,7 +197,6 @@
return dev;
}
-EXPORT_SYMBOL(phy_device_create);
/**
* get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@
* If you want to monitor your own link state, don't call
* this function.
*/
-void phy_prepare_link(struct phy_device *phydev,
+static void phy_prepare_link(struct phy_device *phydev,
void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
@@ -435,8 +438,8 @@
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface)
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
@@ -473,7 +476,6 @@
* (dev_flags and interface) */
return phy_init_hw(phydev);
}
-EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
-int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
int oldadv, adv;
@@ -605,7 +607,6 @@
return changed;
}
-EXPORT_SYMBOL(genphy_config_advert);
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@
* to the values in phydev. Assumes that the values are valid.
* Please see phy_sanitize_settings().
*/
-int genphy_setup_forced(struct phy_device *phydev)
+static int genphy_setup_forced(struct phy_device *phydev)
{
int err;
int ctl = 0;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 26c37d3..8ecc170 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -146,11 +146,13 @@
#define MAX_CMD_DESCRIPTORS 1024
#define MAX_RCV_DESCRIPTORS_1G 4096
#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_RCV_DESCRIPTORS_VF 2048
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define DEFAULT_RCV_DESCRIPTORS_VF 1024
#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
@@ -942,6 +944,7 @@
#define QLCNIC_LOOPBACK_TEST 2
#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
struct qlcnic_filter {
@@ -970,6 +973,8 @@
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
+ u16 max_rxd;
+ u16 max_jumbo_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
@@ -1129,7 +1134,7 @@
#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
-#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 25e93a5..ec21d24 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -437,14 +437,8 @@
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring->tx_pending = adapter->num_txd;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
- } else {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
+ ring->rx_max_pending = adapter->max_rxd;
+ ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
- u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
u16 num_rxd, num_jumbo_rxd, num_txd;
-
if (ring->rx_mini_pending)
return -EOPNOTSUPP;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
- max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
- MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+ MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
- MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+ MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+ "rx jumbo");
num_txd = qlcnic_validate_ringparam(ring->tx_pending,
MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index f047c7c..a3dcd04 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -656,13 +656,23 @@
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
-
if (adapter->ahw.port_type == QLCNIC_XGBE) {
- adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ } else {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ }
+
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
} else if (adapter->ahw.port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = !!use_msi_x;
@@ -1440,7 +1450,6 @@
netdev->irq = adapter->msix_entries[0].vector;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
@@ -1860,6 +1869,11 @@
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
+
+ if (jiffies >
+ (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, src_addr, vlan_id,
+ tx_ring);
tmp_fil->ftime = jiffies;
return;
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a478786..2282139 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2226,7 +2226,6 @@
int ql_core_dump(struct ql_adapter *qdev,
struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-void qlge_set_multicast_list(struct net_device *ndev);
-#if 1
-#define QL_ALL_DUMP
-#define QL_REG_DUMP
-#define QL_DEV_DUMP
-#define QL_CB_DUMP
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
/* #define QL_IB_DUMP */
/* #define QL_OB_DUMP */
-#endif
#ifdef QL_REG_DUMP
extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index ba0053d..c30e0fe 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -94,6 +94,9 @@
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+static int ql_wol(struct ql_adapter *qdev);
+static void qlge_set_multicast_list(struct net_device *ndev);
+
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@
}
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+ qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
+
+ if (qdev->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(qdev->vlgrp, vid))
+ continue;
+ qlge_vlan_rx_add_vid(qdev->ndev, vid);
+ }
+ }
+}
+
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
@@ -3842,7 +3859,7 @@
"MAC address %pM\n", ndev->dev_addr);
}
-int ql_wol(struct ql_adapter *qdev)
+static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
+ /* Restore vlan setting. */
+ qlge_restore_vlan(qdev);
+
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@
return &ndev->stats;
}
-void qlge_set_multicast_list(struct net_device *ndev)
+static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index f84e857..0e7c7c7 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@
return status;
}
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
-int ql_mb_idc_ack(struct ql_adapter *qdev)
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@
return status;
}
-int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9f..4c4d169 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
spin_unlock_irq(&tp->lock);
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
return 0;
}
@@ -2931,7 +2931,7 @@
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+ .intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4588,7 +4588,8 @@
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver)) {
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index a9ae505..66c2f1a 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -961,9 +961,9 @@
lp->rx_error_count = 0;
lp->rx_error_dpc_count = 0;
lp->rx_session_id[0] = 0x50;
- lp->rx_session_id[0] = 0x48;
- lp->rx_session_id[0] = 0x44;
- lp->rx_session_id[0] = 0x42;
+ lp->rx_session_id[1] = 0x48;
+ lp->rx_session_id[2] = 0x44;
+ lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9265315..3a0cc63 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -531,7 +531,7 @@
if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
- err = -EAGAIN;
+ return -EAGAIN;
}
err = init_seeq(dev, sp, sregs);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0..220e039 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@
/* device is off until link detection */
netif_carrier_off(dev);
- netif_stop_queue(dev);
return dev;
}
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ac279fa..ab9e3b7 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -688,18 +688,8 @@
return 0;
}
-
-/* VJ header compression */
-EXPORT_SYMBOL(slhc_init);
-EXPORT_SYMBOL(slhc_free);
-EXPORT_SYMBOL(slhc_remember);
-EXPORT_SYMBOL(slhc_compress);
-EXPORT_SYMBOL(slhc_uncompress);
-EXPORT_SYMBOL(slhc_toss);
-
#else /* CONFIG_INET */
-
int
slhc_toss(struct slcompress *comp)
{
@@ -738,6 +728,10 @@
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
return NULL;
}
+
+#endif /* CONFIG_INET */
+
+/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
-#endif /* CONFIG_INET */
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 016360c..8a79585 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
#define __SMSC911X_H__
#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
-#define SMSC911X_EEPROM_SIZE ((u32)7)
+#define SMSC911X_EEPROM_SIZE ((u32)128)
#define USE_DEBUG 0
/* This is the maximum number of packets to be received every
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 823b9e6..06bc603 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -337,33 +337,19 @@
return 0;
}
-static inline void stmmac_mac_enable_rx(void __iomem *ioaddr)
+static inline void stmmac_enable_mac(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
- value |= MAC_RNABLE_RX;
- /* Set the RE (receive enable bit into the MAC CTRL register). */
+
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_enable_tx(void __iomem *ioaddr)
+static inline void stmmac_disable_mac(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
- value |= MAC_ENABLE_TX;
- /* Set the TE (transmit enable bit into the MAC CTRL register). */
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-static inline void stmmac_mac_disable_rx(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
- value &= ~MAC_RNABLE_RX;
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
- value &= ~MAC_ENABLE_TX;
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
writel(value, ioaddr + MAC_CTRL_REG);
}
@@ -857,8 +843,7 @@
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
/* Enable the MAC Rx/Tx */
- stmmac_mac_enable_rx(priv->ioaddr);
- stmmac_mac_enable_tx(priv->ioaddr);
+ stmmac_enable_mac(priv->ioaddr);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -928,9 +913,8 @@
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
- /* Disable the MAC core */
- stmmac_mac_disable_tx(priv->ioaddr);
- stmmac_mac_disable_rx(priv->ioaddr);
+ /* Disable the MAC Rx/Tx */
+ stmmac_disable_mac(priv->ioaddr);
netif_carrier_off(dev);
@@ -1787,8 +1771,7 @@
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_mac_disable_rx(priv->ioaddr);
- stmmac_mac_disable_tx(priv->ioaddr);
+ stmmac_disable_mac(priv->ioaddr);
netif_carrier_off(ndev);
@@ -1839,13 +1822,11 @@
dis_ic);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
- stmmac_mac_disable_tx(priv->ioaddr);
-
/* Enable Power down mode by programming the PMT regs */
if (device_can_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else
- stmmac_mac_disable_rx(priv->ioaddr);
+ stmmac_disable_mac(priv->ioaddr);
} else {
priv->shutdown = 1;
/* Although this can appear slightly redundant it actually
@@ -1886,8 +1867,7 @@
netif_device_attach(dev);
/* Enable the MAC and DMA */
- stmmac_mac_enable_rx(priv->ioaddr);
- stmmac_mac_enable_tx(priv->ioaddr);
+ stmmac_enable_mac(priv->ioaddr);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 852e917..30ccbb6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9948,16 +9948,16 @@
!((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
+ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
spin_lock_bh(&tp->lock);
- if (wol->wolopts & WAKE_MAGIC) {
+ if (device_may_wakeup(dp))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, true);
- } else {
+ else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, false);
- }
spin_unlock_bh(&tp->lock);
+
return 0;
}
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 663b886..7930203 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1220,7 +1220,7 @@
tmp = schedule_timeout_interruptible(tmp);
} while(time_after(tmp, jiffies));
#else
- udelay(time);
+ mdelay(time / 1000);
#endif
}
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb..c78a505 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@
de->media_timer.data = (unsigned long) de;
netif_carrier_off(dev);
- netif_stop_queue(dev);
/* wake up device, assign resources */
rc = pci_enable_device(pdev);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 1cc6713..5b83c3f 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -24,10 +24,6 @@
3XP Processor. It has been tested on x86 and sparc64.
KNOWN ISSUES:
- *) The current firmware always strips the VLAN tag off, even if
- we tell it not to. You should filter VLANs at the switch
- as a workaround (good practice in any event) until we can
- get this fixed.
*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
issue. Hopefully 3Com will fix it.
*) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@
struct pci_dev * pdev;
struct net_device * dev;
struct napi_struct napi;
- spinlock_t state_lock;
- struct vlan_group * vlgrp;
struct basic_ring rxHiRing;
struct basic_ring rxBuffRing;
struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@
return err;
}
-static void
-typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct typhoon *tp = netdev_priv(dev);
- struct cmd_desc xp_cmd;
- int err;
-
- spin_lock_bh(&tp->state_lock);
- if(!tp->vlgrp != !grp) {
- /* We've either been turned on for the first time, or we've
- * been turned off. Update the 3XP.
- */
- if(grp)
- tp->offload |= TYPHOON_OFFLOAD_VLAN;
- else
- tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
-
- /* If the interface is up, the runtime is running -- and we
- * must be up for the vlan core to call us.
- *
- * Do the command outside of the spin lock, as it is slow.
- */
- INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
- TYPHOON_CMD_SET_OFFLOAD_TASKS);
- xp_cmd.parm2 = tp->offload;
- xp_cmd.parm3 = tp->offload;
- spin_unlock_bh(&tp->state_lock);
- err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
- netdev_err(tp->dev, "vlan offload error %d\n", -err);
- spin_lock_bh(&tp->state_lock);
- }
-
- /* now make the change visible */
- tp->vlgrp = grp;
- spin_unlock_bh(&tp->state_lock);
-}
-
static inline void
typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
u32 ring_dma)
@@ -818,7 +774,7 @@
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
- cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
+ cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
@@ -936,7 +892,7 @@
filter |= TYPHOON_RX_FILTER_MCAST_HASH;
}
- INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = filter;
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
}
@@ -1198,6 +1154,20 @@
return 1;
}
+static int
+typhoon_set_flags(struct net_device *dev, u32 data)
+{
+ /* There's no way to turn off the RX VLAN offloading and stripping
+ * on the current 3XP firmware -- it does not respect the offload
+ * settings -- so we only allow the user to toggle the TX processing.
+ */
+ if (!(data & ETH_FLAG_RXVLAN))
+ return -EINVAL;
+
+ return ethtool_op_set_flags(dev, data,
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+}
+
static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
@@ -1224,6 +1194,8 @@
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
.get_ringparam = typhoon_get_ringparam,
+ .set_flags = typhoon_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int
@@ -1309,9 +1281,9 @@
tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+ tp->offload |= TYPHOON_OFFLOAD_VLAN;
spin_lock_init(&tp->command_lock);
- spin_lock_init(&tp->state_lock);
/* Force the writes to the shared memory area out before continuing. */
wmb();
@@ -1328,7 +1300,7 @@
tp->rxHiRing.lastWrite = 0;
tp->rxBuffRing.lastWrite = 0;
tp->cmdRing.lastWrite = 0;
- tp->cmdRing.lastWrite = 0;
+ tp->respRing.lastWrite = 0;
tp->txLoRing.lastRead = 0;
tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@
} else
skb_checksum_none_assert(new_skb);
- spin_lock(&tp->state_lock);
- if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
- vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
- ntohl(rx->vlanTag) & 0xffff);
- else
- netif_receive_skb(new_skb);
- spin_unlock(&tp->state_lock);
+ if (rx->rxStatus & TYPHOON_RX_VLAN)
+ __vlan_hwaccel_put_tag(new_skb,
+ ntohl(rx->vlanTag) & 0xffff);
+ netif_receive_skb(new_skb);
received++;
budget--;
@@ -1989,11 +1958,9 @@
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
- spin_lock_bh(&tp->state_lock);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- spin_unlock_bh(&tp->state_lock);
if(err < 0)
goto error_out;
@@ -2231,13 +2198,9 @@
if(!netif_running(dev))
return 0;
- spin_lock_bh(&tp->state_lock);
- if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
- spin_unlock_bh(&tp->state_lock);
- netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
- return -EBUSY;
- }
- spin_unlock_bh(&tp->state_lock);
+ /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
+ if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
@@ -2338,7 +2301,6 @@
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = typhoon_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
- .ndo_vlan_rx_register = typhoon_vlan_rx_register,
};
static int __devinit
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9d..c04d49e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
#define DRIVER_VERSION "22-Aug-2005"
@@ -1273,6 +1274,16 @@
struct usb_device *xdev;
int status;
const char *name;
+ struct usb_driver *driver = to_usb_driver(udev->dev.driver);
+
+ /* usbnet already took usb runtime pm, so have to enable the feature
+ * for usb interface, otherwise usb_autopm_get_interface may return
+ * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ */
+ if (!driver->supports_autosuspend) {
+ driver->supports_autosuspend = 1;
+ pm_runtime_enable(&udev->dev);
+ }
name = udev->dev.driver->name;
info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 37108fb..969c751 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -88,9 +88,9 @@
/* features */
enum {
- UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
- UPT1_F_RSS = 0x0002,
- UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
- UPT1_F_LRO = 0x0008,
+ UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
+ UPT1_F_RSS = cpu_to_le64(0x0002),
+ UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
+ UPT1_F_LRO = cpu_to_le64(0x0008),
};
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index ca7727b..4d84912 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -523,9 +523,9 @@
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
-#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
-#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
- * filters */
+#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
+ * filters */
struct Vmxnet3_PM_PktFilter {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f60e0e..21314e0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -873,7 +873,7 @@
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
- ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
+ ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
@@ -1563,8 +1563,7 @@
adapter->vlan_grp = grp;
/* update FEATURES to device */
- set_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
/*
@@ -1587,7 +1586,7 @@
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = NULL;
- if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
+ if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
int i;
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
/* update FEATURES to device */
- reset_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
@@ -1762,15 +1760,15 @@
/* set up feature flags */
if (adapter->rxcsum)
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
+ devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->lro) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
+ devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
adapter->vlan_grp) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
}
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
@@ -2619,13 +2617,13 @@
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
in_dev_put(in_dev);
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
@@ -2667,7 +2665,7 @@
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
+ adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
netif_device_attach(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e4b5a8..b79070b 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,13 +50,11 @@
adapter->rxcsum = val;
if (netif_running(netdev)) {
if (val)
- set_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXCSUM;
else
- reset_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXCSUM;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures |=
- cpu_to_le64(UPT1_F_LRO);
+ UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
- cpu_to_le64(~UPT1_F_LRO);
+ ~UPT1_F_LRO;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c88ea5c..edf2288 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -301,8 +301,8 @@
struct net_device *netdev;
struct pci_dev *pdev;
- u8 *hw_addr0; /* for BAR 0 */
- u8 *hw_addr1; /* for BAR 1 */
+ u8 __iomem *hw_addr0; /* for BAR 0 */
+ u8 __iomem *hw_addr1; /* for BAR 1 */
/* feature control */
bool rxcsum;
@@ -330,14 +330,14 @@
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
+ writel((val), (adapter)->hw_addr0 + (reg))
#define VMXNET3_READ_BAR0_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
+ readl((adapter)->hw_addr0 + (reg))
#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
+ writel((val), (adapter)->hw_addr1 + (reg))
#define VMXNET3_READ_BAR1_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
+ readl((adapter)->hw_addr1 + (reg))
#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -353,21 +353,6 @@
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
-static inline void set_flag_le16(__le16 *data, u16 flag)
-{
- *data = cpu_to_le16(le16_to_cpu(*data) | flag);
-}
-
-static inline void set_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) | flag);
-}
-
-static inline void reset_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
-}
-
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 0e6db59..906a3ca3 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -20,6 +20,179 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
+static enum vxge_hw_status
+__vxge_hw_fifo_create(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ struct vxge_hw_fifo_attr *attr);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_abort(
+ struct __vxge_hw_fifo *fifoh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_reset(
+ struct __vxge_hw_fifo *ringh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
+ u32 size);
+
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool_entry *entry);
+
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle);
+
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool *blockpool,
+ u32 pool_size,
+ u32 pool_max);
+
+static void
+__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
+
+static void *
+__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+static void
+__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
+ void *memblock,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+
+static struct __vxge_hw_channel*
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+ enum __vxge_hw_channel_type type, u32 length,
+ u32 per_dtr_space, void *userdata);
+
+static void
+__vxge_hw_channel_free(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_reset(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+
+static void
+__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(
+ u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(
+ void __iomem *reg,
+ u64 mask, u32 max_millis);
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+ u64 mask, u32 max_millis)
+{
+ __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+ wmb();
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+ wmb();
+
+ return __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static struct vxge_hw_mempool*
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
+ u32 item_size, u32 private_size, u32 items_initial,
+ u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata);
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats);
+
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+
+static u64
+__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg);
+
+static u32
+__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+
+
+static enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+ u32 operation, u32 offset, u64 *stat);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+
/*
* __vxge_hw_channel_allocate - Allocate memory for channel
* This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@
* Will poll certain register for specified amount of time.
* Will poll until masked bit is not cleared.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
@@ -221,7 +394,7 @@
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
{
enum vxge_hw_status status;
@@ -236,7 +409,7 @@
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
*/
-struct vxge_hw_toc_reg __iomem *
+static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0)
{
u64 val64;
@@ -779,7 +952,7 @@
* vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
* Get the Statistics on aggregate port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats)
{
@@ -814,7 +987,7 @@
* vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
* Get the Statistics on port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_port_stats *port_stats)
{
@@ -952,20 +1125,6 @@
return 0;
#endif
}
-/*
- * vxge_hw_device_debug_mask_get - Get the debug mask
- * This routine returns the current debug mask set
- */
-u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return 0;
- return hldev->debug_module_mask;
-#else
- return 0;
-#endif
-}
/*
* vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@
* first block
* Returns the dma address of the first RxD block
*/
-u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
+static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
{
struct vxge_hw_mempool_dma *dma_object;
@@ -1252,7 +1411,7 @@
* This function creates Ring and initializes it.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_ring_attr *attr)
{
@@ -1363,7 +1522,7 @@
* __vxge_hw_ring_abort - Returns the RxD
* This function terminates the RxDs of ring
*/
-enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
void *rxdh;
struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@
* __vxge_hw_ring_reset - Resets the ring
* This function resets the ring during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@
* __vxge_hw_ring_delete - Removes the ring
* This function freeup the memory pool and removes the ring
*/
-enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_ring *ring = vp->vpath->ringh;
@@ -1438,7 +1597,7 @@
* __vxge_hw_mempool_grow
* Will resize mempool up to %num_allocate value.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
u32 *num_allocated)
{
@@ -1527,7 +1686,7 @@
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
-struct vxge_hw_mempool*
+static struct vxge_hw_mempool*
__vxge_hw_mempool_create(
struct __vxge_hw_device *devh,
u32 memblock_size,
@@ -1644,7 +1803,7 @@
/*
* vxge_hw_mempool_destroy
*/
-void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
u32 i, j;
struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@
* __vxge_hw_device_vpath_config_check - Check vpath configuration.
* Check the vpath configuration
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
{
enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@
* _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
* Set the swapper bits appropriately for the lagacy section.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
{
u64 val64;
@@ -1977,7 +2136,7 @@
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
#ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@
* __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(
struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2021,28 +2180,6 @@
}
/*
- * vxge_hw_mgmt_device_config - Retrieve device configuration.
- * Get device configuration. Permits to retrieve at run-time configuration
- * values that were used to initialize and configure the device.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_config *dev_config, int size)
-{
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
- return VXGE_HW_ERR_INVALID_DEVICE;
-
- if (size != sizeof(struct vxge_hw_device_config))
- return VXGE_HW_ERR_VERSION_CONFLICT;
-
- memcpy(dev_config, &hldev->config,
- sizeof(struct vxge_hw_device_config));
-
- return VXGE_HW_OK;
-}
-
-/*
* vxge_hw_mgmt_reg_read - Read Titan register.
*/
enum vxge_hw_status
@@ -2438,7 +2575,7 @@
* __vxge_hw_fifo_abort - Returns the TxD
* This function terminates the TxDs of fifo
*/
-enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
{
void *txdlh;
@@ -2466,7 +2603,7 @@
* __vxge_hw_fifo_reset - Resets the fifo
* This function resets the fifo during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -2501,7 +2638,7 @@
* in pci config space.
* Read from the vpath pci config space.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0, u32 offset, u32 *val)
{
@@ -2542,7 +2679,7 @@
* __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
* Returns the function number of the vpath.
*/
-u32
+static u32
__vxge_hw_vpath_func_id_get(u32 vp_id,
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
{
@@ -2573,7 +2710,7 @@
* __vxge_hw_vpath_card_info_get - Get the serial numbers,
* part number and product description.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@
* __vxge_hw_vpath_fw_ver_get - Get the fw version
* Returns FW Version
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@
* __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
* Returns pci function mode
*/
-u64
+static u64
__vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@
* __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
* from MAC address table.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_addr_get(
u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@
* This routine checks the vpath_rst_in_prog register to see if
* adapter completed the reset process for the vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
{
enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@
* __vxge_hw_vpath_reset
* This routine resets the vpath on the device
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3383,7 +3520,7 @@
* __vxge_hw_vpath_sw_reset
* This routine resets the vpath structures
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@
* This routine configures the prc registers of virtual path using the config
* passed
*/
-void
+static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3480,7 +3617,7 @@
* This routine configures the kdfc registers of virtual path using the
* config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3553,7 +3690,7 @@
* __vxge_hw_vpath_mac_configure
* This routine configures the mac of virtual path using the config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3621,7 +3758,7 @@
* This routine configures the tim registers of virtual path using the config
* passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3897,7 +4034,7 @@
* This routine is the final phase of init which initializes the
* registers of the vpath using the configuration passed.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3966,7 +4103,7 @@
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
struct vxge_hw_vp_config *config)
{
@@ -4022,7 +4159,7 @@
* __vxge_hw_vp_terminate - Terminate Virtual Path structure
* This routine closes all channels it opened and freeup memory
*/
-void
+static void
__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@
* Enable the DMA vpath statistics. The function is to be called to re-enable
* the adapter to update stats into the host memory
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@
* __vxge_hw_vpath_stats_access - Get the statistics from the given location
* and offset and perform an operation
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
u32 operation, u32 offset, u64 *stat)
{
@@ -4445,7 +4582,7 @@
/*
* __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@
/*
* __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
{
u64 *val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@
/*
* __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
*/
-enum vxge_hw_status __vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@
return status;
}
+
+static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
+ unsigned long size)
+{
+ gfp_t flags;
+ void *vaddr;
+
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
+
+ vaddr = kmalloc((size), flags);
+
+ vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+ struct pci_dev **p_dma_acch)
+{
+ unsigned long misaligned = *(unsigned long *)p_dma_acch;
+ u8 *tmp = (u8 *)vaddr;
+ tmp -= misaligned;
+ kfree((void *)tmp);
+}
+
/*
* __vxge_hw_blockpool_create - Create block pool
*/
@@ -4845,12 +5008,11 @@
* vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
* Adds a block to block pool
*/
-void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle)
{
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 1a94343..5c00861 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -183,11 +183,6 @@
char version[VXGE_HW_FW_STRLEN];
};
-u64
-__vxge_hw_vpath_pci_func_mode_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
/**
* struct vxge_hw_fifo_config - Configuration of fifo.
* @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@
u8 hash_type_ipv6ex_en;
};
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
void vxge_hw_device_debug_set(
struct __vxge_hw_device *devh,
enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@
u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
/**
* vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
* @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@
struct vxge_hw_fifo_attr fifo_attr;
};
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max);
-
-void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
-
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
- u32 size);
-
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool_entry *entry);
-
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
- void *memblock,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
- struct vxge_hw_device_config *dev_config, int size);
-
enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
-enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
struct vxge_hw_device_config *device_config);
@@ -1954,38 +1893,6 @@
return vaddr;
}
-extern void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle);
-
-static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
- unsigned long size)
-{
- gfp_t flags;
- void *vaddr;
-
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
@@ -2010,40 +1917,6 @@
(*memblock_item_idx) * mempool->items_priv_size;
}
-enum vxge_hw_status
-__vxge_hw_mempool_grow(
- struct vxge_hw_mempool *mempool,
- u32 num_allocate,
- u32 *num_allocated);
-
-struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
- struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 private_size,
- u32 items_initial,
- u32 items_max,
- struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata);
-
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type, u32 length,
- u32 per_dtr_space, void *userdata);
-
-void
-__vxge_hw_channel_free(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_initialize(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_reset(
- struct __vxge_hw_channel *channel);
-
/*
* __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
* for the fifo.
@@ -2065,9 +1938,6 @@
struct vxge_hw_vpath_attr *attr,
struct __vxge_hw_vpath_handle **vpath_handle);
-enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
-
enum vxge_hw_status vxge_hw_vpath_close(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2089,54 +1959,9 @@
struct __vxge_hw_vpath_handle *vpath_handle,
u32 new_mtu);
-enum vxge_hw_status vxge_hw_vpath_stats_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_access(
- struct __vxge_hw_virtualpath *vpath,
- u32 operation,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
-
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_device_register_poll(
- void __iomem *reg,
- u64 mask, u32 max_millis);
#ifndef readq
static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@
writel(val, addr);
}
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- status = __vxge_hw_device_register_poll(addr, mask, max_millis);
- return status;
-}
-
-struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0);
-
-enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_vpath_pci_read(
- struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0,
- u32 offset,
- u32 *val);
-
-enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
-
-u32
-__vxge_hw_vpath_func_id_get(
- u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
-
-enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
+
/**
* vxge_debug
* @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 05679e3..b67746e 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1142,7 +1142,7 @@
.get_ethtool_stats = vxge_get_ethtool_stats,
};
-void initialize_ethtool_ops(struct net_device *ndev)
+void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index a69542e..813829f 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -82,6 +82,16 @@
static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@
* This function is called during interrupt context to notify link up state
* change.
*/
-void
+static void
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@
* This function is called during interrupt context to notify link down state
* change.
*/
-void
+static void
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@
* If the interrupt is because of a received frame or if the receive ring
* contains fresh as yet un-processed frames, this function is called.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
@@ -531,7 +541,7 @@
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@
*
* Enables the interrupts for the vpath
*/
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id = 0;
@@ -1279,7 +1289,7 @@
*
* Disables the interrupts for the vpath
*/
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id;
@@ -1553,7 +1563,7 @@
*
* driver may reset the chip on events of serr, eccerr, etc
*/
-int vxge_reset(struct vxgedev *vdev)
+static int vxge_reset(struct vxgedev *vdev)
{
return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
@@ -1724,7 +1734,7 @@
return status;
}
-int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@
}
/* Add a mac address to DA table */
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@
return status;
}
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
@@ -1807,7 +1818,8 @@
return FALSE;
}
/* delete a mac address from DA table */
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@
}
/* Store all vlan ids from the list to the vid table */
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@
}
/* Store all mac addresses from the list to the DA table */
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
@@ -1916,7 +1928,7 @@
}
/* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@
}
/* close vpaths */
-void vxge_close_vpaths(struct vxgedev *vdev, int index)
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
{
struct vxge_vpath *vpath;
int i;
@@ -1966,7 +1978,7 @@
}
/* open vpaths */
-int vxge_open_vpaths(struct vxgedev *vdev)
+static int vxge_open_vpaths(struct vxgedev *vdev)
{
struct vxge_hw_vpath_attr attr;
enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@
}
/* Loop throught the mac address list and delete all the entries */
-void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
{
struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@
}
}
-int do_vxge_close(struct net_device *dev, int do_io)
+static int do_vxge_close(struct net_device *dev, int do_io)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@
#endif
};
-int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev_out)
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+ struct vxge_config *config,
+ int high_dma, int no_of_vpath,
+ struct vxgedev **vdev_out)
{
struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- initialize_ethtool_ops(ndev);
+ vxge_initialize_ethtool_ops(ndev);
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@
*
* This function will unregister and free network device
*/
-void
+static void
vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index d4be07e..de64536 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -396,64 +396,7 @@
mod_timer(&timer, (jiffies + exp)); \
} while (0);
-int __devinit vxge_device_register(struct __vxge_hw_device *devh,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev);
-
-void vxge_device_unregister(struct __vxge_hw_device *devh);
-
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
-
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
-
-void vxge_callback_link_up(struct __vxge_hw_device *devh);
-
-void vxge_callback_link_down(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-
-int vxge_reset(struct vxgedev *vdev);
-
-enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata);
-
-enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skbs, int *more);
-
-int vxge_close(struct net_device *dev);
-
-int vxge_open(struct net_device *dev);
-
-void vxge_close_vpaths(struct vxgedev *vdev, int index);
-
-int vxge_open_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_add(struct vxge_vpath *vpath,
- struct macInfo *mac);
-
-void vxge_free_mac_add_list(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-
-int do_vxge_close(struct net_device *dev, int do_io);
-extern void initialize_ethtool_ops(struct net_device *ndev);
+extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index cedf08f..4bdb611 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,6 +17,13 @@
#include "vxge-config.h"
#include "vxge-main.h"
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
+ u32 vp_id, enum vxge_hw_event type);
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms);
+
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
@@ -513,7 +520,7 @@
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -538,7 +545,7 @@
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -564,7 +571,7 @@
*
* Handle error.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
@@ -646,7 +653,7 @@
* it swaps the reserve and free arrays.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
@@ -692,7 +699,8 @@
* Posts a dtr to work array.
*
*/
-void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
+ void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
@@ -1658,37 +1666,6 @@
}
/**
- * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
- * from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the next vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
- u64 data;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, vid, &data);
-
- *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
- return status;
-}
-
-/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
@@ -1898,9 +1875,9 @@
* Process vpath alarms.
*
*/
-enum vxge_hw_status __vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms)
{
u64 val64;
u64 alarm_status;
@@ -2265,36 +2242,6 @@
}
/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- if (hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clr_msix_one_shot_vec[msix_id%4]);
- } else {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clear_msix_mask_vect[msix_id%4]);
- }
-}
-
-/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
@@ -2316,22 +2263,6 @@
}
/**
- * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
- * @vp: Virtual Path handle.
- *
- * The function masks all msix interrupt for the given vpath
- *
- */
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
- &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-}
-
-/**
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
* @vp: Virtual Path handle.
*
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 6fa07d1..9890d4d 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1749,14 +1749,6 @@
u64 *stat);
enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats);
-
-enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
struct vxge_hw_xmac_stats *xmac_stats);
@@ -2117,49 +2109,10 @@
#endif
};
-/* ========================= RING PRIVATE API ============================= */
-u64
-__vxge_hw_ring_first_block_address_get(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_ring_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_ring_abort(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_reset(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
/* ========================= FIFO PRIVATE API ============================= */
struct vxge_hw_fifo_attr;
-enum vxge_hw_status
-__vxge_hw_fifo_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_fifo_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_fifo_abort(
- struct __vxge_hw_fifo *fifoh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_reset(
- struct __vxge_hw_fifo *ringh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@
u32 is_last);
};
-void
-__vxge_hw_mempool_destroy(
- struct vxge_hw_mempool *mempool);
-
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
@@ -2195,61 +2144,10 @@
u64 data2);
enum vxge_hw_status
-__vxge_hw_vpath_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
__vxge_hw_vpath_enable(
struct __vxge_hw_device *devh,
u32 vp_id);
-void
-__vxge_hw_vpath_prc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vp_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id,
- struct vxge_hw_vp_config *config);
-
-void
-__vxge_hw_vp_terminate(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms);
-
void vxge_hw_device_intr_enable(
struct __vxge_hw_device *devh);
@@ -2321,11 +2219,6 @@
u64 *vid);
enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *vid);
-
-enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 vid);
@@ -2387,16 +2280,9 @@
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
-
enum vxge_hw_status vxge_hw_vpath_intr_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2415,12 +2301,6 @@
void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
-
-void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
-
void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2436,18 +2316,4 @@
void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
-/* ========================== PRIVATE API ================================= */
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_error(
- struct __vxge_hw_device *hldev,
- u32 vp_id,
- enum vxge_hw_event type);
-
#endif
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6be43eb..f47a714 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -440,7 +440,6 @@
* index of buffer to be filled by driver; state EMPTY or PACKING
*/
int next_buf_to_fill;
- int sync_iqdio_error;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@@ -695,14 +694,6 @@
int is_vmac;
};
-struct qeth_skb_data {
- __u32 magic;
- int count;
-};
-
-#define QETH_SKB_MAGIC 0x71657468
-#define QETH_SIGA_CC2_RETRIES 3
-
struct qeth_rx {
int b_count;
int b_index;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7642670..e6b2df0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -877,8 +877,8 @@
return;
}
-static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb)
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf)
{
int i;
struct sk_buff *skb;
@@ -887,13 +887,11 @@
if (buf->buffer->element[0].flags & 0x40)
atomic_dec(&queue->set_pci_flags_count);
- if (!qeth_skip_skb) {
+ skb = skb_dequeue(&buf->skb_list);
+ while (skb) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
- while (skb) {
- atomic_dec(&skb->users);
- dev_kfree_skb_any(skb);
- skb = skb_dequeue(&buf->skb_list);
- }
}
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
@@ -909,12 +907,6 @@
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf)
-{
- __qeth_clear_output_buffer(queue, buf, 0);
-}
-
void qeth_clear_qdio_buffers(struct qeth_card *card)
{
int i, j;
@@ -2833,7 +2825,6 @@
}
}
- queue->sync_iqdio_error = 0;
queue->card->dev->trans_start = jiffies;
if (queue->card->options.performance_stats) {
queue->card->perf_stats.outbound_do_qdio_cnt++;
@@ -2849,10 +2840,6 @@
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
- if (rc > 0) {
- if (!(rc & QDIO_ERROR_SIGA_BUSY))
- queue->sync_iqdio_error = rc & 3;
- }
if (rc) {
queue->card->stats.tx_errors += count;
/* ignore temporary SIGA errors without busy condition */
@@ -2916,7 +2903,7 @@
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev)
+ if (card->dev && (card->dev->flags & IFF_UP))
napi_schedule(&card->napi);
}
EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
@@ -2940,7 +2927,6 @@
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct qeth_qdio_out_buffer *buffer;
int i;
- unsigned qeth_send_err;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
@@ -2956,9 +2942,8 @@
}
for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
- qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error);
- __qeth_clear_output_buffer(queue, buffer,
- (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
+ qeth_handle_send_error(card, buffer, qdio_error);
+ qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
/* check if we need to do something on this outbound queue */
@@ -3183,10 +3168,7 @@
int offset, int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
- struct sk_buff *skb1;
- struct qeth_skb_data *retry_ctrl;
int index;
- int rc;
/* spin until we get the queue ... */
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
@@ -3205,25 +3187,6 @@
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1);
- if (queue->sync_iqdio_error == 2) {
- skb1 = skb_dequeue(&buffer->skb_list);
- while (skb1) {
- atomic_dec(&skb1->users);
- skb1 = skb_dequeue(&buffer->skb_list);
- }
- retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
- if (retry_ctrl->magic != QETH_SKB_MAGIC) {
- retry_ctrl->magic = QETH_SKB_MAGIC;
- retry_ctrl->count = 0;
- }
- if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
- retry_ctrl->count++;
- rc = dev_queue_xmit(skb);
- } else {
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 2, "qrdrop");
- }
- }
return 0;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 6bb876d..fbe86ca 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -797,7 +797,6 @@
* - iff DATA transfer is active, carrier is "on"
* - tx queueing enabled if open *and* carrier is "on"
*/
- netif_stop_queue(net);
netif_carrier_off(net);
dev->gadget = g;
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 3a779ff..7e8ca75 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -88,12 +88,6 @@
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
- /* Sent to kevent to create cn_queue only when needed */
- struct work_struct wq_creation;
- /* Tell if the wq_creation job is pending/completed */
- atomic_t wq_requested;
- /* Wait for cn_queue to be created */
- wait_queue_head_t wq_created;
struct list_head queue_list;
spinlock_t queue_lock;
@@ -141,8 +135,6 @@
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
-
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 7187bd8..749f01c 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -462,7 +462,8 @@
* @dccps_hc_rx_insert_options - receiver wants to add options when acking
* @dccps_hc_tx_insert_options - sender wants to add options when sending
* @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
- * @dccps_xmit_timer - timer for when CCID is not ready to send
+ * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
+ * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
* @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
*/
struct dccp_sock {
@@ -502,6 +503,7 @@
__u8 dccps_hc_rx_insert_options:1;
__u8 dccps_hc_tx_insert_options:1;
__u8 dccps_server_timewait:1;
+ struct tasklet_struct dccps_xmitlet;
struct timer_list dccps_xmit_timer;
};
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index d0f0801..1ff81b5 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -12,7 +12,7 @@
#define MARVELL_PHY_ID_88E1121R 0x01410cb0
#define MARVELL_PHY_ID_88E1145 0x01410cd0
#define MARVELL_PHY_ID_88E1240 0x01410e30
-#define MARVELL_PHY_ID_88EC048 0x01410e90
+#define MARVELL_PHY_ID_88E1318S 0x01410e90
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fcd3dda..d8fd2c2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -585,15 +585,15 @@
table->ents[hash & table->mask] = RPS_NO_CPU;
}
-extern struct rps_sock_flow_table *rps_sock_flow_table;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
- struct rps_map *rps_map;
- struct rps_dev_flow_table *rps_flow_table;
- struct kobject kobj;
- struct netdev_rx_queue *first;
- atomic_t count;
+ struct rps_map __rcu *rps_map;
+ struct rps_dev_flow_table __rcu *rps_flow_table;
+ struct kobject kobj;
+ struct netdev_rx_queue *first;
+ atomic_t count;
} ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */
@@ -944,7 +944,7 @@
/* Protocol specific pointers */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- struct vlan_group *vlgrp; /* VLAN group */
+ struct vlan_group __rcu *vlgrp; /* VLAN group */
#endif
#ifdef CONFIG_NET_DSA
void *dsa_ptr; /* dsa specific data */
@@ -952,7 +952,7 @@
void *atalk_ptr; /* AppleTalk link */
struct in_device __rcu *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
- void *ip6_ptr; /* IPv6 specific data */
+ struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
void *ec_ptr; /* Econet specific data */
void *ax25_ptr; /* AX.25 specific data */
struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
@@ -1072,7 +1072,7 @@
struct pcpu_dstats __percpu *dstats; /* dummy stats */
};
/* GARP */
- struct garp_port *garp_port;
+ struct garp_port __rcu *garp_port;
/* class/net/name entry */
struct device dev;
@@ -1554,6 +1554,11 @@
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
+ if (WARN_ON(!dev_queue)) {
+ printk(KERN_INFO "netif_stop_queue() cannot be called before "
+ "register_netdev()");
+ return;
+ }
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a6e047a..7da5fa8 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -472,11 +472,7 @@
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_device_register(struct phy_device *phy);
-int phy_clear_interrupt(struct phy_device *phydev);
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
int phy_init_hw(struct phy_device *phydev);
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
const char *bus_id, u32 flags, phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -492,17 +488,12 @@
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
-void phy_sanitize_settings(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
-int phy_enable_interrupts(struct phy_device *phydev);
-int phy_disable_interrupts(struct phy_device *phydev);
static inline int phy_read_status(struct phy_device *phydev) {
return phydev->drv->read_status(phydev);
}
-int genphy_config_advert(struct phy_device *phydev);
-int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
@@ -511,8 +502,6 @@
int genphy_resume(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
-void phy_prepare_link(struct phy_device *phydev,
- void (*adjust_link)(struct net_device *));
void phy_state_machine(struct work_struct *work);
void phy_start_machine(struct phy_device *phydev,
void (*handler)(struct net_device *));
@@ -523,7 +512,6 @@
struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
void phy_device_free(struct phy_device *phydev);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5146b50..86b652f 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -322,7 +322,7 @@
int offset,
unsigned int len, __wsum *csump);
-extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 6da573c..8eff83b 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -28,7 +28,7 @@
* @sockaddr: Socket address to connect.
* @priority: Priority of the connection.
* @link_selector: Link selector (high bandwidth or low latency)
- * @link_name: Name of the CAIF Link Layer to use.
+ * @ifindex: kernel index of the interface.
* @param: Connect Request parameters (CAIF_SO_REQ_PARAM).
*
* This struct is used when connecting a CAIF channel.
@@ -39,7 +39,7 @@
struct sockaddr_caif sockaddr;
enum caif_channel_priority priority;
enum caif_link_selector link_selector;
- char link_name[16];
+ int ifindex;
struct caif_param param;
};
diff --git a/include/net/caif/caif_shm.h b/include/net/caif/caif_shm.h
new file mode 100644
index 0000000..5bcce55
--- /dev/null
+++ b/include/net/caif/caif_shm.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_SHM_H_
+#define CAIF_SHM_H_
+
+struct shmdev_layer {
+ u32 shm_base_addr;
+ u32 shm_total_sz;
+ u32 shm_id;
+ u32 shm_loopback;
+ void *hmbx;
+ int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
+ int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
+ struct shmdev_layer *pshm_dev, void *pshm_drv);
+ struct net_device *pshm_netdev;
+};
+
+extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
+extern void caif_shmcore_remove(struct net_device *pshm_netdev);
+
+#endif
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index ce4570d..87c3d11 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -121,6 +121,8 @@
wait_queue_head_t wait;
spinlock_t lock;
bool flow_stop;
+ bool slave;
+ bool slave_talked;
#ifdef CONFIG_DEBUG_FS
enum cfspi_state dbg_state;
u16 pcmd;
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index bd646fa..f688478 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -139,10 +139,10 @@
enum cfcnfg_phy_preference phy_pref);
/**
- * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer
+ * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex,
+ * it matches caif physical id with the kernel interface id.
* @cnfg: Configuration object
- * @name: Name of the Physical Layer (Caif Link Layer)
+ * @ifi: ifindex obtained from socket.c bindtodevice.
*/
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name);
-
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
#endif /* CFCNFG_H_ */
diff --git a/include/net/dn.h b/include/net/dn.h
index e5469f7..a514a3c 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -225,7 +225,7 @@
extern int decnet_dr_count;
extern int decnet_no_fc_max_cwnd;
-extern int sysctl_decnet_mem[3];
+extern long sysctl_decnet_mem[3];
extern int sysctl_decnet_wmem[3];
extern int sysctl_decnet_rmem[3];
diff --git a/include/net/dst.h b/include/net/dst.h
index a217c83..ffe9cb7 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -95,7 +95,7 @@
unsigned long lastuse;
union {
struct dst_entry *next;
- struct rtable *rt_next;
+ struct rtable __rcu *rt_next;
struct rt6_info *rt6_next;
struct dn_route *dn_next;
};
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 1fa5306..51665b3 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -2,6 +2,7 @@
#define _NET_DST_OPS_H
#include <linux/types.h>
#include <linux/percpu_counter.h>
+#include <linux/cache.h>
struct dst_entry;
struct kmem_cachep;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 106f309..075f1e3 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -20,7 +20,7 @@
u32 table;
u8 action;
u32 target;
- struct fib_rule * ctarget;
+ struct fib_rule __rcu *ctarget;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
struct rcu_head rcu;
diff --git a/include/net/garp.h b/include/net/garp.h
index 825f172..f4c2959 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -107,7 +107,7 @@
};
struct garp_port {
- struct garp_applicant *applicants[GARP_APPLICATION_MAX + 1];
+ struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1];
};
extern int garp_register_application(struct garp_application *app);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 417d0c8..fe239bf 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,7 +15,7 @@
struct inet_peer {
/* group together avl_left,avl_right,v4daddr to speedup lookups */
- struct inet_peer *avl_left, *avl_right;
+ struct inet_peer __rcu *avl_left, *avl_right;
__be32 v4daddr; /* peer's address */
__u32 avl_height;
struct list_head unused;
diff --git a/include/net/ip.h b/include/net/ip.h
index dbee3fe..86e2b18 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -59,7 +59,7 @@
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
struct ip_ra_chain {
- struct ip_ra_chain *next;
+ struct ip_ra_chain __rcu *next;
struct sock *sk;
union {
void (*destructor)(struct sock *);
@@ -68,7 +68,7 @@
struct rcu_head rcu;
};
-extern struct ip_ra_chain *ip_ra_chain;
+extern struct ip_ra_chain __rcu *ip_ra_chain;
/* IP flags. */
#define IP_CE 0x8000 /* Flag: "Congestion" */
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fc94ec5..fc73e667 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -13,7 +13,7 @@
/* IPv6 tunnel */
struct ip6_tnl {
- struct ip6_tnl *next; /* next tunnel in list */
+ struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
struct ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index ba3666d..07bdb5e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -158,6 +158,8 @@
extern void fib_table_select_default(struct fib_table *table,
const struct flowi *flp,
struct fib_result *res);
+extern void fib_free_table(struct fib_table *tb);
+
#ifndef CONFIG_IP_MULTIPLE_TABLES
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 58abbf9..a32654d 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -16,7 +16,7 @@
};
struct ip_tunnel {
- struct ip_tunnel *next;
+ struct ip_tunnel __rcu *next;
struct net_device *dev;
int err_count; /* Number of arrived ICMP errors */
@@ -34,12 +34,12 @@
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
#endif
- struct ip_tunnel_prl_entry *prl; /* potential router list */
+ struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
unsigned int prl_count; /* # of entries in PRL */
};
struct ip_tunnel_prl_entry {
- struct ip_tunnel_prl_entry *next;
+ struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
u16 flags;
struct rcu_head rcu_head;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 65af9a0..1bf812b 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -88,7 +88,7 @@
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;
#endif
- struct net_generic *gen;
+ struct net_generic __rcu *gen;
/* Note : following structs are cache line aligned */
#ifdef CONFIG_XFRM
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f3b201d..9801c55 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -384,7 +384,7 @@
*
* Returns the first attribute which matches the specified type.
*/
-static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
+static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
int hdrlen, int attrtype)
{
return nla_find(nlmsg_attrdata(nlh, hdrlen),
diff --git a/include/net/protocol.h b/include/net/protocol.h
index f1effdd3c..dc07495 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -89,10 +89,10 @@
#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
-extern const struct net_protocol *inet_protos[MAX_INET_PROTOS];
+extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-extern const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
+extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
#endif
extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
diff --git a/include/net/sock.h b/include/net/sock.h
index 73a4f97..a6338d0 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -301,7 +301,7 @@
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
- struct sk_filter *sk_filter;
+ struct sk_filter __rcu *sk_filter;
void *sk_protinfo;
struct timer_list sk_timer;
ktime_t sk_stamp;
@@ -762,7 +762,7 @@
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
- atomic_t *memory_allocated; /* Current allocated memory. */
+ atomic_long_t *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
/*
* Pressure flag: try to collapse.
@@ -771,7 +771,7 @@
* is strict, actions are advisory and have some latency.
*/
int *memory_pressure;
- int *sysctl_mem;
+ long *sysctl_mem;
int *sysctl_wmem;
int *sysctl_rmem;
int max_header;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4fee042..e36c874 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -224,7 +224,7 @@
extern int sysctl_tcp_reordering;
extern int sysctl_tcp_ecn;
extern int sysctl_tcp_dsack;
-extern int sysctl_tcp_mem[3];
+extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
@@ -247,7 +247,7 @@
extern int sysctl_tcp_thin_linear_timeouts;
extern int sysctl_tcp_thin_dupack;
-extern atomic_t tcp_memory_allocated;
+extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
@@ -280,7 +280,7 @@
}
if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+ atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
return true;
return false;
}
diff --git a/include/net/udp.h b/include/net/udp.h
index 200b828..bb967dd 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -105,10 +105,10 @@
extern struct proto udp_prot;
-extern atomic_t udp_memory_allocated;
+extern atomic_long_t udp_memory_allocated;
/* sysctl variables for udp */
-extern int sysctl_udp_mem[3];
+extern long sysctl_udp_mem[3];
extern int sysctl_udp_rmem_min;
extern int sysctl_udp_wmem_min;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f28d7c9..bcfb6b2 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1264,7 +1264,7 @@
int (*handler)(struct sk_buff *skb);
int (*err_handler)(struct sk_buff *skb, u32 info);
- struct xfrm_tunnel *next;
+ struct xfrm_tunnel __rcu *next;
int priority;
};
@@ -1272,7 +1272,7 @@
int (*handler)(struct sk_buff *skb);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info);
- struct xfrm6_tunnel *next;
+ struct xfrm6_tunnel __rcu *next;
int priority;
};
diff --git a/net/802/garp.c b/net/802/garp.c
index 941f2a3..c1df2da 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -346,8 +346,8 @@
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
@@ -366,8 +366,8 @@
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
@@ -546,11 +546,11 @@
static void garp_release_port(struct net_device *dev)
{
- struct garp_port *port = dev->garp_port;
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
unsigned int i;
for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
- if (port->applicants[i])
+ if (rtnl_dereference(port->applicants[i]))
return;
}
rcu_assign_pointer(dev->garp_port, NULL);
@@ -565,7 +565,7 @@
ASSERT_RTNL();
- if (!dev->garp_port) {
+ if (!rtnl_dereference(dev->garp_port)) {
err = garp_init_port(dev);
if (err < 0)
goto err1;
@@ -601,8 +601,8 @@
void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
ASSERT_RTNL();
diff --git a/net/802/stp.c b/net/802/stp.c
index 53c8f77..978c30b 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -21,8 +21,8 @@
#define GARP_ADDR_MAX 0x2F
#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
-static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
-static const struct stp_proto *stp_proto __read_mostly;
+static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
+static const struct stp_proto __rcu *stp_proto __read_mostly;
static struct llc_sap *sap __read_mostly;
static unsigned int sap_registered;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 05b867e..52077ca 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -112,7 +112,7 @@
ASSERT_RTNL();
- grp = real_dev->vlgrp;
+ grp = rtnl_dereference(real_dev->vlgrp);
BUG_ON(!grp);
/* Take it out of our own structures, but be sure to interlock with
@@ -177,7 +177,7 @@
struct vlan_group *grp, *ngrp = NULL;
int err;
- grp = real_dev->vlgrp;
+ grp = rtnl_dereference(real_dev->vlgrp);
if (!grp) {
ngrp = grp = vlan_group_alloc(real_dev);
if (!grp)
@@ -385,7 +385,7 @@
dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
}
- grp = dev->vlgrp;
+ grp = rtnl_dereference(dev->vlgrp);
if (!grp)
goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 26eaebf..bb86d29 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,6 +1392,7 @@
ax25_cb *ax25;
int err = 0;
+ memset(fsa, 0, sizeof(fsa));
lock_sock(sk);
ax25 = ax25_sk(sk);
@@ -1403,7 +1404,6 @@
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->dest_addr;
- fsa->fsa_ax25.sax25_ndigis = 0;
if (ax25->digipeat != NULL) {
ndigi = ax25->digipeat->ndigi;
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 76ae683..d522d8c 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -16,11 +16,18 @@
{
struct dev_info *dev_info;
enum cfcnfg_phy_preference pref;
- memset(l, 0, sizeof(*l));
- l->priority = s->priority;
+ int res;
- if (s->link_name[0] != '\0')
- l->phyid = cfcnfg_get_named(cnfg, s->link_name);
+ memset(l, 0, sizeof(*l));
+ /* In caif protocol low value is high priority */
+ l->priority = CAIF_PRIO_MAX - s->priority + 1;
+
+ if (s->ifindex != 0){
+ res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
+ if (res < 0)
+ return res;
+ l->phyid = res;
+ }
else {
switch (s->link_selector) {
case CAIF_LINK_HIGH_BANDW:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index b99369a..a42a408 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -307,6 +307,8 @@
case NETDEV_UNREGISTER:
caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
netdev_info(dev, "unregister\n");
atomic_set(&caifd->state, what);
caif_device_destroy(dev);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2eca2dd..1bf0cf5 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -716,8 +716,7 @@
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
- int prio, linksel;
- struct ifreq ifreq;
+ int linksel;
if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
return -ENOPROTOOPT;
@@ -735,33 +734,6 @@
release_sock(&cf_sk->sk);
return 0;
- case SO_PRIORITY:
- if (lvl != SOL_SOCKET)
- goto bad_sol;
- if (ol < sizeof(int))
- return -EINVAL;
- if (copy_from_user(&prio, ov, sizeof(int)))
- return -EINVAL;
- lock_sock(&(cf_sk->sk));
- cf_sk->conn_req.priority = prio;
- release_sock(&cf_sk->sk);
- return 0;
-
- case SO_BINDTODEVICE:
- if (lvl != SOL_SOCKET)
- goto bad_sol;
- if (ol < sizeof(struct ifreq))
- return -EINVAL;
- if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
- return -EFAULT;
- lock_sock(&(cf_sk->sk));
- strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
- sizeof(cf_sk->conn_req.link_name));
- cf_sk->conn_req.link_name
- [sizeof(cf_sk->conn_req.link_name)-1] = 0;
- release_sock(&cf_sk->sk);
- return 0;
-
case CAIFSO_REQ_PARAM:
if (lvl != SOL_CAIF)
goto bad_sol;
@@ -880,6 +852,18 @@
sock->state = SS_CONNECTING;
sk->sk_state = CAIF_CONNECTING;
+ /* Check priority value comming from socket */
+ /* if priority value is out of range it will be ajusted */
+ if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
+ cf_sk->conn_req.priority = CAIF_PRIO_MAX;
+ else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
+ cf_sk->conn_req.priority = CAIF_PRIO_MIN;
+ else
+ cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
+
+ /*ifindex = id of the interface.*/
+ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
+
dbfs_atomic_inc(&cnt.num_connect_req);
cf_sk->layer.receive = caif_sktrecv_cb;
err = caif_connect_client(&cf_sk->conn_req,
@@ -905,6 +889,7 @@
cf_sk->maxframe = mtu - (headroom + tailroom);
if (cf_sk->maxframe < 1) {
pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
+ err = -ENODEV;
goto out;
}
@@ -1142,7 +1127,7 @@
set_rx_flow_on(cf_sk);
/* Set default options on configuration */
- cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
+ cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
cf_sk->conn_req.protocol = protocol;
/* Increase the number of sockets created. */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 41adafd1..21ede14 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -173,18 +173,15 @@
return NULL;
}
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
{
int i;
-
- /* Try to match with specified name */
- for (i = 0; i < MAX_PHY_LAYERS; i++) {
- if (cnfg->phy_layers[i].frm_layer != NULL
- && strcmp(cnfg->phy_layers[i].phy_layer->name,
- name) == 0)
- return cnfg->phy_layers[i].frm_layer->id;
- }
- return 0;
+ for (i = 0; i < MAX_PHY_LAYERS; i++)
+ if (cnfg->phy_layers[i].frm_layer != NULL &&
+ cnfg->phy_layers[i].ifindex == ifi)
+ return i;
+ return -ENODEV;
}
int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 08f267a..3cd8f97 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -361,11 +361,10 @@
struct cfctrl_request_info *p, *tmp;
struct cfctrl *ctrl = container_obj(layr);
spin_lock(&ctrl->info_list_lock);
- pr_warn("enter\n");
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
if (p->client_layer == adap_layer) {
- pr_warn("cancel req :%d\n", p->sequence_no);
+ pr_debug("cancel req :%d\n", p->sequence_no);
list_del(&p->list);
kfree(p);
}
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 496fda9..11a2af4 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -12,6 +12,8 @@
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
@@ -38,5 +40,17 @@
static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
+ struct cfsrvl *service = container_obj(layr);
+ struct caif_payload_info *info;
+ int ret;
+
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ /* Add info for MUX-layer to route the packet out */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->dev_info = &service->dev_info;
+
return layr->dn->transmit(layr->dn, pkt);
}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index bde8481..e2fb5fa 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -193,7 +193,7 @@
static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
{
- caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size);
+ caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
/* Add info for MUX-layer to route the packet out. */
cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/compat.c b/net/compat.c
index 63d260e..3649d58 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -41,10 +41,12 @@
compat_size_t len;
if (get_user(len, &uiov32->iov_len) ||
- get_user(buf, &uiov32->iov_base)) {
- tot_len = -EFAULT;
- break;
- }
+ get_user(buf, &uiov32->iov_base))
+ return -EFAULT;
+
+ if (len > INT_MAX - tot_len)
+ len = INT_MAX - tot_len;
+
tot_len += len;
kiov->iov_base = compat_ptr(buf);
kiov->iov_len = (__kernel_size_t) len;
diff --git a/net/core/dev.c b/net/core/dev.c
index 78b5a89..0dd54a6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1685,10 +1685,10 @@
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
- return ((features & NETIF_F_GEN_CSUM) ||
- ((features & NETIF_F_IP_CSUM) &&
+ return ((features & NETIF_F_NO_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_IPV6_CSUM) &&
+ ((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
@@ -1696,22 +1696,18 @@
static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
{
+ __be16 protocol = skb->protocol;
int features = dev->features;
- if (vlan_tx_tag_present(skb))
+ if (vlan_tx_tag_present(skb)) {
features &= dev->vlan_features;
-
- if (can_checksum_protocol(features, skb->protocol))
- return true;
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- if (can_checksum_protocol(dev->features & dev->vlan_features,
- veh->h_vlan_encapsulated_proto))
- return true;
+ protocol = veh->h_vlan_encapsulated_proto;
+ features &= dev->vlan_features;
}
- return false;
+ return can_checksum_protocol(features, protocol);
}
/**
@@ -2135,7 +2131,7 @@
} else {
struct sock *sk = skb->sk;
queue_index = sk_tx_queue_get(sk);
- if (queue_index < 0) {
+ if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
queue_index = 0;
if (dev->real_num_tx_queues > 1)
@@ -2213,7 +2209,7 @@
}
static DEFINE_PER_CPU(int, xmit_recursion);
-#define RECURSION_LIMIT 3
+#define RECURSION_LIMIT 10
/**
* dev_queue_xmit - transmit a buffer
@@ -2413,7 +2409,7 @@
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
/*
@@ -2425,7 +2421,7 @@
struct rps_dev_flow **rflowp)
{
struct netdev_rx_queue *rxqueue;
- struct rps_map *map = NULL;
+ struct rps_map *map;
struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table;
int cpu = -1;
@@ -2444,15 +2440,15 @@
} else
rxqueue = dev->_rx;
- if (rxqueue->rps_map) {
- map = rcu_dereference(rxqueue->rps_map);
- if (map && map->len == 1) {
+ map = rcu_dereference(rxqueue->rps_map);
+ if (map) {
+ if (map->len == 1) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
goto done;
}
- } else if (!rxqueue->rps_flow_table) {
+ } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
goto done;
}
@@ -5416,7 +5412,7 @@
/* paranoia */
BUG_ON(netdev_refcnt_read(dev));
WARN_ON(rcu_dereference_raw(dev->ip_ptr));
- WARN_ON(dev->ip6_ptr);
+ WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
WARN_ON(dev->dn_ptr);
if (dev->destructor)
diff --git a/net/core/dst.c b/net/core/dst.c
index 8abe628..b99c7c7 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -370,6 +370,7 @@
static struct notifier_block dst_dev_notifier = {
.notifier_call = dst_dev_event,
+ .priority = -10, /* must be called after other network notifiers */
};
void __init dst_init(void)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1bc3f25..82a4369 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -351,12 +351,12 @@
list_for_each_entry(r, &ops->rules_list, list) {
if (r->pref == rule->target) {
- rule->ctarget = r;
+ RCU_INIT_POINTER(rule->ctarget, r);
break;
}
}
- if (rule->ctarget == NULL)
+ if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
unresolved = 1;
} else if (rule->action == FR_ACT_GOTO)
goto errout_free;
@@ -373,6 +373,11 @@
fib_rule_get(rule);
+ if (last)
+ list_add_rcu(&rule->list, &last->list);
+ else
+ list_add_rcu(&rule->list, &ops->rules_list);
+
if (ops->unresolved_rules) {
/*
* There are unresolved goto rules in the list, check if
@@ -381,7 +386,7 @@
list_for_each_entry(r, &ops->rules_list, list) {
if (r->action == FR_ACT_GOTO &&
r->target == rule->pref) {
- BUG_ON(r->ctarget != NULL);
+ BUG_ON(rtnl_dereference(r->ctarget) != NULL);
rcu_assign_pointer(r->ctarget, rule);
if (--ops->unresolved_rules == 0)
break;
@@ -395,11 +400,6 @@
if (unresolved)
ops->unresolved_rules++;
- if (last)
- list_add_rcu(&rule->list, &last->list);
- else
- list_add_rcu(&rule->list, &ops->rules_list);
-
notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
flush_route_cache(ops);
rules_ops_put(ops);
@@ -487,7 +487,7 @@
*/
if (ops->nr_goto_rules > 0) {
list_for_each_entry(tmp, &ops->rules_list, list) {
- if (tmp->ctarget == rule) {
+ if (rtnl_dereference(tmp->ctarget) == rule) {
rcu_assign_pointer(tmp->ctarget, NULL);
ops->unresolved_rules++;
}
@@ -545,7 +545,8 @@
frh->action = rule->action;
frh->flags = rule->flags;
- if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
+ if (rule->action == FR_ACT_GOTO &&
+ rcu_dereference_raw(rule->ctarget) == NULL)
frh->flags |= FIB_RULE_UNRESOLVED;
if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 7adf503..23e9b2a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -89,8 +89,8 @@
rcu_read_lock_bh();
filter = rcu_dereference_bh(sk->sk_filter);
if (filter) {
- unsigned int pkt_len = sk_run_filter(skb, filter->insns,
- filter->len);
+ unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
rcu_read_unlock_bh();
@@ -112,39 +112,41 @@
*/
unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
{
- struct sock_filter *fentry; /* We walk down these */
void *ptr;
u32 A = 0; /* Accumulator */
u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
+ unsigned long memvalid = 0;
u32 tmp;
int k;
int pc;
+ BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
/*
* Process array of filter instructions.
*/
for (pc = 0; pc < flen; pc++) {
- fentry = &filter[pc];
+ const struct sock_filter *fentry = &filter[pc];
+ u32 f_k = fentry->k;
switch (fentry->code) {
case BPF_S_ALU_ADD_X:
A += X;
continue;
case BPF_S_ALU_ADD_K:
- A += fentry->k;
+ A += f_k;
continue;
case BPF_S_ALU_SUB_X:
A -= X;
continue;
case BPF_S_ALU_SUB_K:
- A -= fentry->k;
+ A -= f_k;
continue;
case BPF_S_ALU_MUL_X:
A *= X;
continue;
case BPF_S_ALU_MUL_K:
- A *= fentry->k;
+ A *= f_k;
continue;
case BPF_S_ALU_DIV_X:
if (X == 0)
@@ -152,49 +154,49 @@
A /= X;
continue;
case BPF_S_ALU_DIV_K:
- A /= fentry->k;
+ A /= f_k;
continue;
case BPF_S_ALU_AND_X:
A &= X;
continue;
case BPF_S_ALU_AND_K:
- A &= fentry->k;
+ A &= f_k;
continue;
case BPF_S_ALU_OR_X:
A |= X;
continue;
case BPF_S_ALU_OR_K:
- A |= fentry->k;
+ A |= f_k;
continue;
case BPF_S_ALU_LSH_X:
A <<= X;
continue;
case BPF_S_ALU_LSH_K:
- A <<= fentry->k;
+ A <<= f_k;
continue;
case BPF_S_ALU_RSH_X:
A >>= X;
continue;
case BPF_S_ALU_RSH_K:
- A >>= fentry->k;
+ A >>= f_k;
continue;
case BPF_S_ALU_NEG:
A = -A;
continue;
case BPF_S_JMP_JA:
- pc += fentry->k;
+ pc += f_k;
continue;
case BPF_S_JMP_JGT_K:
- pc += (A > fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A > f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JGE_K:
- pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A >= f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JEQ_K:
- pc += (A == fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A == f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JSET_K:
- pc += (A & fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A & f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JGT_X:
pc += (A > X) ? fentry->jt : fentry->jf;
@@ -209,7 +211,7 @@
pc += (A & X) ? fentry->jt : fentry->jf;
continue;
case BPF_S_LD_W_ABS:
- k = fentry->k;
+ k = f_k;
load_w:
ptr = load_pointer(skb, k, 4, &tmp);
if (ptr != NULL) {
@@ -218,7 +220,7 @@
}
break;
case BPF_S_LD_H_ABS:
- k = fentry->k;
+ k = f_k;
load_h:
ptr = load_pointer(skb, k, 2, &tmp);
if (ptr != NULL) {
@@ -227,7 +229,7 @@
}
break;
case BPF_S_LD_B_ABS:
- k = fentry->k;
+ k = f_k;
load_b:
ptr = load_pointer(skb, k, 1, &tmp);
if (ptr != NULL) {
@@ -242,32 +244,34 @@
X = skb->len;
continue;
case BPF_S_LD_W_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_w;
case BPF_S_LD_H_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_h;
case BPF_S_LD_B_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_b;
case BPF_S_LDX_B_MSH:
- ptr = load_pointer(skb, fentry->k, 1, &tmp);
+ ptr = load_pointer(skb, f_k, 1, &tmp);
if (ptr != NULL) {
X = (*(u8 *)ptr & 0xf) << 2;
continue;
}
return 0;
case BPF_S_LD_IMM:
- A = fentry->k;
+ A = f_k;
continue;
case BPF_S_LDX_IMM:
- X = fentry->k;
+ X = f_k;
continue;
case BPF_S_LD_MEM:
- A = mem[fentry->k];
+ A = (memvalid & (1UL << f_k)) ?
+ mem[f_k] : 0;
continue;
case BPF_S_LDX_MEM:
- X = mem[fentry->k];
+ X = (memvalid & (1UL << f_k)) ?
+ mem[f_k] : 0;
continue;
case BPF_S_MISC_TAX:
X = A;
@@ -276,14 +280,16 @@
A = X;
continue;
case BPF_S_RET_K:
- return fentry->k;
+ return f_k;
case BPF_S_RET_A:
return A;
case BPF_S_ST:
- mem[fentry->k] = A;
+ memvalid |= 1UL << f_k;
+ mem[f_k] = A;
continue;
case BPF_S_STX:
- mem[fentry->k] = X;
+ memvalid |= 1UL << f_k;
+ mem[f_k] = X;
continue;
default:
WARN_ON(1);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 72aceb1..c40f27e 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,10 +35,9 @@
* in any case.
*/
-long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{
- int size, ct;
- long err;
+ int size, ct, err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
@@ -62,14 +61,13 @@
err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) {
- err += iov[ct].iov_len;
- /*
- * Goal is not to verify user data, but to prevent returning
- * negative value, which is interpreted as errno.
- * Overflow is still possible, but it is harmless.
- */
- if (err < 0)
- return -EMSGSIZE;
+ size_t len = iov[ct].iov_len;
+
+ if (len > INT_MAX - err) {
+ len = INT_MAX - err;
+ iov[ct].iov_len = len;
+ }
+ err += len;
}
return err;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b143173..a5ff5a8 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -598,7 +598,8 @@
}
spin_lock(&rps_map_lock);
- old_map = queue->rps_map;
+ old_map = rcu_dereference_protected(queue->rps_map,
+ lockdep_is_held(&rps_map_lock));
rcu_assign_pointer(queue->rps_map, map);
spin_unlock(&rps_map_lock);
@@ -677,7 +678,8 @@
table = NULL;
spin_lock(&rps_dev_flow_lock);
- old_table = queue->rps_flow_table;
+ old_table = rcu_dereference_protected(queue->rps_flow_table,
+ lockdep_is_held(&rps_dev_flow_lock));
rcu_assign_pointer(queue->rps_flow_table, table);
spin_unlock(&rps_dev_flow_lock);
@@ -705,13 +707,17 @@
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
struct netdev_rx_queue *first = queue->first;
+ struct rps_map *map;
+ struct rps_dev_flow_table *flow_table;
- if (queue->rps_map)
- call_rcu(&queue->rps_map->rcu, rps_map_release);
- if (queue->rps_flow_table)
- call_rcu(&queue->rps_flow_table->rcu,
- rps_dev_flow_table_release);
+ map = rcu_dereference_raw(queue->rps_map);
+ if (map)
+ call_rcu(&map->rcu, rps_map_release);
+
+ flow_table = rcu_dereference_raw(queue->rps_flow_table);
+ if (flow_table)
+ call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
if (atomic_dec_and_test(&first->count))
kfree(first);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index c988e68..3f86026 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -42,7 +42,9 @@
BUG_ON(!mutex_is_locked(&net_mutex));
BUG_ON(id == 0);
- ng = old_ng = net->gen;
+ old_ng = rcu_dereference_protected(net->gen,
+ lockdep_is_held(&net_mutex));
+ ng = old_ng;
if (old_ng->len >= id)
goto assign;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2c0df0f..33bc382 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -771,10 +771,10 @@
static unsigned long num_arg(const char __user * user_buffer,
unsigned long maxlen, unsigned long *num)
{
- int i = 0;
+ int i;
*num = 0;
- for (; i < maxlen; i++) {
+ for (i = 0; i < maxlen; i++) {
char c;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
@@ -789,9 +789,9 @@
static int strn_len(const char __user * user_buffer, unsigned int maxlen)
{
- int i = 0;
+ int i;
- for (; i < maxlen; i++) {
+ for (i = 0; i < maxlen; i++) {
char c;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
@@ -846,7 +846,7 @@
{
struct seq_file *seq = file->private_data;
struct pktgen_dev *pkt_dev = seq->private;
- int i = 0, max, len;
+ int i, max, len;
char name[16], valstr[32];
unsigned long value = 0;
char *pg_result = NULL;
@@ -860,13 +860,13 @@
return -EINVAL;
}
- max = count - i;
- tmp = count_trail_chars(&user_buffer[i], max);
+ max = count;
+ tmp = count_trail_chars(user_buffer, max);
if (tmp < 0) {
pr_warning("illegal format\n");
return tmp;
}
- i += tmp;
+ i = tmp;
/* Read variable name */
@@ -887,10 +887,11 @@
i += len;
if (debug) {
- char tb[count + 1];
- if (copy_from_user(tb, user_buffer, count))
+ size_t copy = min_t(size_t, count, 1023);
+ char tb[copy + 1];
+ if (copy_from_user(tb, user_buffer, copy))
return -EFAULT;
- tb[count] = 0;
+ tb[copy] = 0;
printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
(unsigned long)count, tb);
}
@@ -1764,7 +1765,7 @@
{
struct seq_file *seq = file->private_data;
struct pktgen_thread *t = seq->private;
- int i = 0, max, len, ret;
+ int i, max, len, ret;
char name[40];
char *pg_result;
@@ -1773,12 +1774,12 @@
return -EINVAL;
}
- max = count - i;
- len = count_trail_chars(&user_buffer[i], max);
+ max = count;
+ len = count_trail_chars(user_buffer, max);
if (len < 0)
return len;
- i += len;
+ i = len;
/* Read variable name */
@@ -1975,7 +1976,7 @@
const char *ifname)
{
char b[IFNAMSIZ+5];
- int i = 0;
+ int i;
for (i = 0; ifname[i] != '@'; i++) {
if (i == IFNAMSIZ)
@@ -2519,8 +2520,8 @@
{
if (pkt_dev->cflows) {
/* let go of the SAs if we have them */
- int i = 0;
- for (; i < pkt_dev->cflows; i++) {
+ int i;
+ for (i = 0; i < pkt_dev->cflows; i++) {
struct xfrm_state *x = pkt_dev->flows[i].x;
if (x) {
xfrm_state_put(x);
@@ -2611,8 +2612,8 @@
/* Update any of the values, used when we're incrementing various
* fields.
*/
- queue_map = pkt_dev->cur_queue_map;
mod_cur_headers(pkt_dev);
+ queue_map = pkt_dev->cur_queue_map;
datalen = (odev->hard_header_len + 16) & ~0xf;
@@ -2975,8 +2976,8 @@
/* Update any of the values, used when we're incrementing various
* fields.
*/
- queue_map = pkt_dev->cur_queue_map;
mod_cur_headers(pkt_dev);
+ queue_map = pkt_dev->cur_queue_map;
skb = __netdev_alloc_skb(odev,
pkt_dev->cur_pkt_size + 64
diff --git a/net/core/sock.c b/net/core/sock.c
index 11db436..fb60801 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1225,7 +1225,7 @@
sock_reset_flag(newsk, SOCK_DONE);
skb_queue_head_init(&newsk->sk_error_queue);
- filter = newsk->sk_filter;
+ filter = rcu_dereference_protected(newsk->sk_filter, 1);
if (filter != NULL)
sk_filter_charge(newsk, filter);
@@ -1653,10 +1653,10 @@
{
struct proto *prot = sk->sk_prot;
int amt = sk_mem_pages(size);
- int allocated;
+ long allocated;
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
- allocated = atomic_add_return(amt, prot->memory_allocated);
+ allocated = atomic_long_add_return(amt, prot->memory_allocated);
/* Under limit. */
if (allocated <= prot->sysctl_mem[0]) {
@@ -1714,7 +1714,7 @@
/* Alas. Undo changes. */
sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
- atomic_sub(amt, prot->memory_allocated);
+ atomic_long_sub(amt, prot->memory_allocated);
return 0;
}
EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1727,12 +1727,12 @@
{
struct proto *prot = sk->sk_prot;
- atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
+ atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
prot->memory_allocated);
sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
if (prot->memory_pressure && *prot->memory_pressure &&
- (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
+ (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
*prot->memory_pressure = 0;
}
EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2452,12 +2452,12 @@
static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
{
- seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
+ seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
proto->name,
proto->obj_size,
sock_prot_inuse_get(seq_file_net(seq), proto),
- proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
+ proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
proto->max_header,
proto->slab == NULL ? "no" : "yes",
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 01eee5d..385b609 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -34,7 +34,8 @@
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rps_sock_flow_table;
+ orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 117fb09..75c3582 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -134,13 +134,41 @@
extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
+/*
+ * Congestion control of queued data packets via CCID decision.
+ *
+ * The TX CCID performs its congestion-control by indicating whether and when a
+ * queued packet may be sent, using the return code of ccid_hc_tx_send_packet().
+ * The following modes are supported via the symbolic constants below:
+ * - timer-based pacing (CCID returns a delay value in milliseconds);
+ * - autonomous dequeueing (CCID internally schedules dccps_xmitlet).
+ */
+
+enum ccid_dequeueing_decision {
+ CCID_PACKET_SEND_AT_ONCE = 0x00000, /* "green light": no delay */
+ CCID_PACKET_DELAY_MAX = 0x0FFFF, /* maximum delay in msecs */
+ CCID_PACKET_DELAY = 0x10000, /* CCID msec-delay mode */
+ CCID_PACKET_WILL_DEQUEUE_LATER = 0x20000, /* CCID autonomous mode */
+ CCID_PACKET_ERR = 0xF0000, /* error condition */
+};
+
+static inline int ccid_packet_dequeue_eval(const int return_code)
+{
+ if (return_code < 0)
+ return CCID_PACKET_ERR;
+ if (return_code == 0)
+ return CCID_PACKET_SEND_AT_ONCE;
+ if (return_code <= CCID_PACKET_DELAY_MAX)
+ return CCID_PACKET_DELAY;
+ return return_code;
+}
+
static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
struct sk_buff *skb)
{
- int rc = 0;
if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL)
- rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
- return rc;
+ return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
+ return CCID_PACKET_SEND_AT_ONCE;
}
static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index d850e29..6576eae 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -78,12 +78,9 @@
static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{
- struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- if (hc->tx_pipe < hc->tx_cwnd)
- return 0;
-
- return 1; /* XXX CCID should dequeue when ready instead of polling */
+ if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
+ return CCID_PACKET_WILL_DEQUEUE_LATER;
+ return CCID_PACKET_SEND_AT_ONCE;
}
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@@ -115,6 +112,7 @@
{
struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
@@ -129,8 +127,6 @@
if (hc->tx_rto > DCCP_RTO_MAX)
hc->tx_rto = DCCP_RTO_MAX;
- sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
-
/* adjust pipe, cwnd etc */
hc->tx_ssthresh = hc->tx_cwnd / 2;
if (hc->tx_ssthresh < 2)
@@ -146,6 +142,12 @@
hc->tx_rpseq = 0;
hc->tx_rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1);
+
+ /* if we were blocked before, we may now send cwnd=1 packet */
+ if (sender_was_blocked)
+ tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ /* restart backed-off timer */
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
out:
bh_unlock_sock(sk);
sock_put(sk);
@@ -434,6 +436,7 @@
{
struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
u64 ackno, seqno;
struct ccid2_seq *seqp;
unsigned char *vector;
@@ -631,6 +634,10 @@
sk_stop_timer(sk, &hc->tx_rtotimer);
else
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
+
+ /* check if incoming Acks allow pending packets to be sent */
+ if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
+ tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
}
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 9731c2d..25cb6b2 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -81,6 +81,11 @@
u64 tx_high_ack;
};
+static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
+{
+ return hc->tx_pipe >= hc->tx_cwnd;
+}
+
struct ccid2_hc_rx_sock {
int rx_data;
};
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3060a60..3d604e1 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -268,11 +268,11 @@
sock_put(sk);
}
-/*
- * returns
- * > 0: delay (in msecs) that should pass before actually sending
- * = 0: can send immediately
- * < 0: error condition; do not send packet
+/**
+ * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
+ * @skb: next packet candidate to send on @sk
+ * This function uses the convention of ccid_packet_dequeue_eval() and
+ * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
*/
static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{
@@ -348,7 +348,7 @@
/* set the nominal send time for the next following packet */
hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
- return 0;
+ return CCID_PACKET_SEND_AT_ONCE;
}
static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 3eb264b..a8ed459 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -243,8 +243,9 @@
extern void dccp_send_sync(struct sock *sk, const u64 seq,
const enum dccp_pkt_type pkt_type);
-extern void dccp_write_xmit(struct sock *sk, int block);
-extern void dccp_write_space(struct sock *sk);
+extern void dccp_write_xmit(struct sock *sk);
+extern void dccp_write_space(struct sock *sk);
+extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
extern void dccp_init_xmit_timers(struct sock *sk);
static inline void dccp_clear_xmit_timers(struct sock *sk)
diff --git a/net/dccp/output.c b/net/dccp/output.c
index a988fe9..45b9185 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -209,108 +209,150 @@
}
/**
- * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
+ * dccp_wait_for_ccid - Await CCID send permission
* @sk: socket to wait for
- * @skb: current skb to pass on for waiting
- * @delay: sleep timeout in milliseconds (> 0)
- * This function is called by default when the socket is closed, and
- * when a non-zero linger time is set on the socket. For consistency
+ * @delay: timeout in jiffies
+ * This is used by CCIDs which need to delay the send time in process context.
*/
-static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
+static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
{
- struct dccp_sock *dp = dccp_sk(sk);
DEFINE_WAIT(wait);
- unsigned long jiffdelay;
- int rc;
+ long remaining;
- do {
- dccp_pr_debug("delayed send by %d msec\n", delay);
- jiffdelay = msecs_to_jiffies(delay);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ sk->sk_write_pending++;
+ release_sock(sk);
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ remaining = schedule_timeout(delay);
- sk->sk_write_pending++;
- release_sock(sk);
- schedule_timeout(jiffdelay);
- lock_sock(sk);
- sk->sk_write_pending--;
-
- if (sk->sk_err)
- goto do_error;
- if (signal_pending(current))
- goto do_interrupted;
-
- rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
- } while ((delay = rc) > 0);
-out:
+ lock_sock(sk);
+ sk->sk_write_pending--;
finish_wait(sk_sleep(sk), &wait);
- return rc;
-do_error:
- rc = -EPIPE;
- goto out;
-do_interrupted:
- rc = -EINTR;
- goto out;
+ if (signal_pending(current) || sk->sk_err)
+ return -1;
+ return remaining;
}
-void dccp_write_xmit(struct sock *sk, int block)
+/**
+ * dccp_xmit_packet - Send data packet under control of CCID
+ * Transmits next-queued payload and informs CCID to account for the packet.
+ */
+static void dccp_xmit_packet(struct sock *sk)
+{
+ int err, len;
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
+
+ if (unlikely(skb == NULL))
+ return;
+ len = skb->len;
+
+ if (sk->sk_state == DCCP_PARTOPEN) {
+ const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
+ /*
+ * See 8.1.5 - Handshake Completion.
+ *
+ * For robustness we resend Confirm options until the client has
+ * entered OPEN. During the initial feature negotiation, the MPS
+ * is smaller than usual, reduced by the Change/Confirm options.
+ */
+ if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
+ DCCP_WARN("Payload too large (%d) for featneg.\n", len);
+ dccp_send_ack(sk);
+ dccp_feat_list_purge(&dp->dccps_featneg);
+ }
+
+ inet_csk_schedule_ack(sk);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ inet_csk(sk)->icsk_rto,
+ DCCP_RTO_MAX);
+ DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
+ } else if (dccp_ack_pending(sk)) {
+ DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
+ } else {
+ DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
+ }
+
+ err = dccp_transmit_skb(sk, skb);
+ if (err)
+ dccp_pr_debug("transmit_skb() returned err=%d\n", err);
+ /*
+ * Register this one as sent even if an error occurred. To the remote
+ * end a local packet drop is indistinguishable from network loss, i.e.
+ * any local drop will eventually be reported via receiver feedback.
+ */
+ ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
+}
+
+/**
+ * dccp_flush_write_queue - Drain queue at end of connection
+ * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
+ * happen that the TX queue is not empty at the end of a connection. We give the
+ * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
+ * returns with a non-empty write queue, it will be purged later.
+ */
+void dccp_flush_write_queue(struct sock *sk, long *time_budget)
+{
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct sk_buff *skb;
+ long delay, rc;
+
+ while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
+ rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
+
+ switch (ccid_packet_dequeue_eval(rc)) {
+ case CCID_PACKET_WILL_DEQUEUE_LATER:
+ /*
+ * If the CCID determines when to send, the next sending
+ * time is unknown or the CCID may not even send again
+ * (e.g. remote host crashes or lost Ack packets).
+ */
+ DCCP_WARN("CCID did not manage to send all packets\n");
+ return;
+ case CCID_PACKET_DELAY:
+ delay = msecs_to_jiffies(rc);
+ if (delay > *time_budget)
+ return;
+ rc = dccp_wait_for_ccid(sk, delay);
+ if (rc < 0)
+ return;
+ *time_budget -= (delay - rc);
+ /* check again if we can send now */
+ break;
+ case CCID_PACKET_SEND_AT_ONCE:
+ dccp_xmit_packet(sk);
+ break;
+ case CCID_PACKET_ERR:
+ skb_dequeue(&sk->sk_write_queue);
+ kfree_skb(skb);
+ dccp_pr_debug("packet discarded due to err=%ld\n", rc);
+ }
+ }
+}
+
+void dccp_write_xmit(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_write_queue))) {
- int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
+ int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
- if (err > 0) {
- if (!block) {
- sk_reset_timer(sk, &dp->dccps_xmit_timer,
- msecs_to_jiffies(err)+jiffies);
- break;
- } else
- err = dccp_wait_for_ccid(sk, skb, err);
- if (err && err != -EINTR)
- DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
- }
-
- skb_dequeue(&sk->sk_write_queue);
- if (err == 0) {
- struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
- const int len = skb->len;
-
- if (sk->sk_state == DCCP_PARTOPEN) {
- const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
- /*
- * See 8.1.5 - Handshake Completion.
- *
- * For robustness we resend Confirm options until the client has
- * entered OPEN. During the initial feature negotiation, the MPS
- * is smaller than usual, reduced by the Change/Confirm options.
- */
- if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
- DCCP_WARN("Payload too large (%d) for featneg.\n", len);
- dccp_send_ack(sk);
- dccp_feat_list_purge(&dp->dccps_featneg);
- }
-
- inet_csk_schedule_ack(sk);
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- inet_csk(sk)->icsk_rto,
- DCCP_RTO_MAX);
- dcb->dccpd_type = DCCP_PKT_DATAACK;
- } else if (dccp_ack_pending(sk))
- dcb->dccpd_type = DCCP_PKT_DATAACK;
- else
- dcb->dccpd_type = DCCP_PKT_DATA;
-
- err = dccp_transmit_skb(sk, skb);
- ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
- if (err)
- DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
- err);
- } else {
- dccp_pr_debug("packet discarded due to err=%d\n", err);
+ switch (ccid_packet_dequeue_eval(rc)) {
+ case CCID_PACKET_WILL_DEQUEUE_LATER:
+ return;
+ case CCID_PACKET_DELAY:
+ sk_reset_timer(sk, &dp->dccps_xmit_timer,
+ jiffies + msecs_to_jiffies(rc));
+ return;
+ case CCID_PACKET_SEND_AT_ONCE:
+ dccp_xmit_packet(sk);
+ break;
+ case CCID_PACKET_ERR:
+ skb_dequeue(&sk->sk_write_queue);
kfree_skb(skb);
+ dccp_pr_debug("packet discarded due to err=%d\n", rc);
}
}
}
@@ -622,7 +664,6 @@
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
if (active) {
- dccp_write_xmit(sk, 1);
dccp_skb_entail(sk, skb);
dccp_transmit_skb(sk, skb_clone(skb, prio));
/*
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 7e5fc04..ef343d5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -726,7 +726,13 @@
goto out_discard;
skb_queue_tail(&sk->sk_write_queue, skb);
- dccp_write_xmit(sk,0);
+ /*
+ * The xmit_timer is set if the TX CCID is rate-based and will expire
+ * when congestion control permits to release further packets into the
+ * network. Window-based CCIDs do not use this timer.
+ */
+ if (!timer_pending(&dp->dccps_xmit_timer))
+ dccp_write_xmit(sk);
out_release:
release_sock(sk);
return rc ? : len;
@@ -951,9 +957,22 @@
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
} else if (sk->sk_state != DCCP_CLOSED) {
+ /*
+ * Normal connection termination. May need to wait if there are
+ * still packets in the TX queue that are delayed by the CCID.
+ */
+ dccp_flush_write_queue(sk, &timeout);
dccp_terminate_connection(sk);
}
+ /*
+ * Flush write queue. This may be necessary in several cases:
+ * - we have been closed by the peer but still have application data;
+ * - abortive termination (unread data or zero linger time),
+ * - normal termination but queue could not be flushed within time limit
+ */
+ __skb_queue_purge(&sk->sk_write_queue);
+
sk_stream_wait_close(sk, timeout);
adjudge_to_death:
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 1a9aa05d..7587870 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -237,32 +237,35 @@
sock_put(sk);
}
-/* Transmit-delay timer: used by the CCIDs to delay actual send time */
-static void dccp_write_xmit_timer(unsigned long data)
+/**
+ * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface
+ * See the comments above %ccid_dequeueing_decision for supported modes.
+ */
+static void dccp_write_xmitlet(unsigned long data)
{
struct sock *sk = (struct sock *)data;
- struct dccp_sock *dp = dccp_sk(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
- sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
+ sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
else
- dccp_write_xmit(sk, 0);
+ dccp_write_xmit(sk);
bh_unlock_sock(sk);
- sock_put(sk);
}
-static void dccp_init_write_xmit_timer(struct sock *sk)
+static void dccp_write_xmit_timer(unsigned long data)
{
- struct dccp_sock *dp = dccp_sk(sk);
-
- setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
- (unsigned long)sk);
+ dccp_write_xmitlet(data);
+ sock_put((struct sock *)data);
}
void dccp_init_xmit_timers(struct sock *sk)
{
- dccp_init_write_xmit_timer(sk);
+ struct dccp_sock *dp = dccp_sk(sk);
+
+ tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
+ setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
+ (unsigned long)sk);
inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
&dccp_keepalive_timer);
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d6b93d1..a76b78d 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -155,7 +155,7 @@
static DEFINE_RWLOCK(dn_hash_lock);
static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
static struct hlist_head dn_wild_sk;
-static atomic_t decnet_memory_allocated;
+static atomic_long_t decnet_memory_allocated;
static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index be3eb8e..28f8b5e 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -38,7 +38,7 @@
int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
/* Reasonable defaults, I hope, based on tcp's defaults */
-int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
+long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
@@ -324,7 +324,7 @@
.data = &sysctl_decnet_mem,
.maxlen = sizeof(sysctl_decnet_mem),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_doulongvec_minmax
},
{
.procname = "decnet_rmem",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 36e27c2..eb6f69a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1052,7 +1052,7 @@
hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
hlist_del(node);
fib_table_flush(tb);
- kfree(tb);
+ fib_free_table(tb);
}
}
kfree(net->ipv4.fib_table_hash);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 43e1c59..b3acb04 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -120,11 +120,12 @@
struct fib_node *f;
hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
- struct hlist_head __rcu *new_head;
+ struct hlist_head *new_head;
hlist_del_rcu(&f->fn_hash);
- new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+ new_head = rcu_dereference_protected(fz->fz_hash, 1) +
+ fn_hash(f->fn_key, fz);
hlist_add_head_rcu(&f->fn_hash, new_head);
}
}
@@ -179,8 +180,8 @@
memcpy(&nfz, fz, sizeof(nfz));
write_seqlock_bh(&fz->fz_lock);
- old_ht = fz->fz_hash;
- nfz.fz_hash = ht;
+ old_ht = rcu_dereference_protected(fz->fz_hash, 1);
+ RCU_INIT_POINTER(nfz.fz_hash, ht);
nfz.fz_hashmask = new_hashmask;
nfz.fz_divisor = new_divisor;
fn_rebuild_zone(&nfz, old_ht, old_divisor);
@@ -236,7 +237,7 @@
seqlock_init(&fz->fz_lock);
fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
fz->fz_hashmask = fz->fz_divisor - 1;
- fz->fz_hash = fz->fz_embedded_hash;
+ RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
fz->fz_order = z;
fz->fz_revorder = 32 - z;
fz->fz_mask = inet_make_mask(z);
@@ -272,7 +273,7 @@
for (fz = rcu_dereference(t->fn_zone_list);
fz != NULL;
fz = rcu_dereference(fz->fz_next)) {
- struct hlist_head __rcu *head;
+ struct hlist_head *head;
struct hlist_node *node;
struct fib_node *f;
__be32 k;
@@ -282,7 +283,7 @@
seq = read_seqbegin(&fz->fz_lock);
k = fz_key(flp->fl4_dst, fz);
- head = &fz->fz_hash[fn_hash(k, fz)];
+ head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
hlist_for_each_entry_rcu(f, node, head, fn_hash) {
if (f->fn_key != k)
continue;
@@ -311,6 +312,7 @@
struct fib_info *last_resort;
struct fn_hash *t = (struct fn_hash *)tb->tb_data;
struct fn_zone *fz = t->fn_zones[0];
+ struct hlist_head *head;
if (fz == NULL)
return;
@@ -320,7 +322,8 @@
order = -1;
rcu_read_lock();
- hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) {
+ head = rcu_dereference(fz->fz_hash);
+ hlist_for_each_entry_rcu(f, node, head, fn_hash) {
struct fib_alias *fa;
list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
@@ -374,7 +377,7 @@
/* Insert node F to FZ. */
static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
{
- struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
hlist_add_head_rcu(&f->fn_hash, head);
}
@@ -382,7 +385,7 @@
/* Return the node in FZ matching KEY. */
static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
{
- struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
struct hlist_node *node;
struct fib_node *f;
@@ -662,7 +665,7 @@
static int fn_flush_list(struct fn_zone *fz, int idx)
{
- struct hlist_head *head = &fz->fz_hash[idx];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
struct hlist_node *node, *n;
struct fib_node *f;
int found = 0;
@@ -713,6 +716,24 @@
return found;
}
+void fib_free_table(struct fib_table *tb)
+{
+ struct fn_hash *table = (struct fn_hash *) tb->tb_data;
+ struct fn_zone *fz, *next;
+
+ next = table->fn_zone_list;
+ while (next != NULL) {
+ fz = next;
+ next = fz->fz_next;
+
+ if (fz->fz_hash != fz->fz_embedded_hash)
+ fz_hash_free(fz->fz_hash, fz->fz_divisor);
+
+ kfree(fz);
+ }
+
+ kfree(tb);
+}
static inline int
fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
@@ -761,14 +782,15 @@
struct fn_zone *fz)
{
int h, s_h;
+ struct hlist_head *head = rcu_dereference(fz->fz_hash);
- if (fz->fz_hash == NULL)
+ if (head == NULL)
return skb->len;
s_h = cb->args[3];
for (h = s_h; h < fz->fz_divisor; h++) {
- if (hlist_empty(&fz->fz_hash[h]))
+ if (hlist_empty(head + h))
continue;
- if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) {
+ if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
cb->args[3] = h;
return -1;
}
@@ -872,7 +894,7 @@
if (!iter->zone->fz_nent)
continue;
- iter->hash_head = iter->zone->fz_hash;
+ iter->hash_head = rcu_dereference(iter->zone->fz_hash);
maxslot = iter->zone->fz_divisor;
for (iter->bucket = 0; iter->bucket < maxslot;
@@ -957,7 +979,7 @@
goto out;
iter->bucket = 0;
- iter->hash_head = iter->zone->fz_hash;
+ iter->hash_head = rcu_dereference(iter->zone->fz_hash);
hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
list_for_each_entry(fa, &fn->fn_alias, fa_list) {
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index a29edf2..c079cc0 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -47,11 +47,8 @@
static inline void fib_result_assign(struct fib_result *res,
struct fib_info *fi)
{
- if (res->fi != NULL)
- fib_info_put(res->fi);
+ /* we used to play games with refcounts, but we now use RCU */
res->fi = fi;
- if (fi != NULL)
- atomic_inc(&fi->fib_clntref);
}
#endif /* _FIB_LOOKUP_H */
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b144508..200eb53 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1797,6 +1797,11 @@
return found;
}
+void fib_free_table(struct fib_table *tb)
+{
+ kfree(tb);
+}
+
void fib_table_select_default(struct fib_table *tb,
const struct flowi *flp,
struct fib_result *res)
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index caea688..c6933f2 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -22,7 +22,7 @@
#include <net/gre.h>
-static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
+static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
static DEFINE_SPINLOCK(gre_proto_lock);
int gre_add_protocol(const struct gre_protocol *proto, u8 version)
@@ -51,7 +51,8 @@
goto err_out;
spin_lock(&gre_proto_lock);
- if (gre_proto[version] != proto)
+ if (rcu_dereference_protected(gre_proto[version],
+ lockdep_is_held(&gre_proto_lock)) != proto)
goto err_out_unlock;
rcu_assign_pointer(gre_proto[version], NULL);
spin_unlock(&gre_proto_lock);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c8877c6..3c53c2d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2306,10 +2306,8 @@
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
- if (in_dev != NULL) {
+ if (in_dev != NULL)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
- in_dev_put(in_dev);
- }
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ba80426..2ada171 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -490,9 +490,11 @@
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
- if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+ if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
struct inet_diag_entry entry;
- struct rtattr *bc = (struct rtattr *)(r + 1);
+ const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+ sizeof(*r),
+ INET_DIAG_REQ_BYTECODE);
struct inet_sock *inet = inet_sk(sk);
entry.family = sk->sk_family;
@@ -512,7 +514,7 @@
entry.dport = ntohs(inet->inet_dport);
entry.userlocks = sk->sk_userlocks;
- if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+ if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
return 0;
}
@@ -527,9 +529,11 @@
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
- if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+ if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
struct inet_diag_entry entry;
- struct rtattr *bc = (struct rtattr *)(r + 1);
+ const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+ sizeof(*r),
+ INET_DIAG_REQ_BYTECODE);
entry.family = tw->tw_family;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -548,7 +552,7 @@
entry.dport = ntohs(tw->tw_dport);
entry.userlocks = 0;
- if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+ if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
return 0;
}
@@ -618,7 +622,7 @@
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt;
- struct rtattr *bc = NULL;
+ const struct nlattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk);
int j, s_j;
int reqnum, s_reqnum;
@@ -638,8 +642,9 @@
if (!lopt || !lopt->qlen)
goto out;
- if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
- bc = (struct rtattr *)(r + 1);
+ if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+ bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
+ INET_DIAG_REQ_BYTECODE);
entry.sport = inet->inet_num;
entry.userlocks = sk->sk_userlocks;
}
@@ -672,8 +677,8 @@
&ireq->rmt_addr;
entry.dport = ntohs(ireq->rmt_port);
- if (!inet_diag_bc_run(RTA_DATA(bc),
- RTA_PAYLOAD(bc), &entry))
+ if (!inet_diag_bc_run(nla_data(bc),
+ nla_len(bc), &entry))
continue;
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9ffa24b..9e94d7c 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -72,18 +72,19 @@
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
static const struct inet_peer peer_fake_node = {
- .avl_left = peer_avl_empty,
- .avl_right = peer_avl_empty,
+ .avl_left = peer_avl_empty_rcu,
+ .avl_right = peer_avl_empty_rcu,
.avl_height = 0
};
static struct {
- struct inet_peer *root;
+ struct inet_peer __rcu *root;
spinlock_t lock;
int total;
} peers = {
- .root = peer_avl_empty,
+ .root = peer_avl_empty_rcu,
.lock = __SPIN_LOCK_UNLOCKED(peers.lock),
.total = 0,
};
@@ -156,11 +157,14 @@
*/
#define lookup(_daddr, _stack) \
({ \
- struct inet_peer *u, **v; \
+ struct inet_peer *u; \
+ struct inet_peer __rcu **v; \
\
stackptr = _stack; \
*stackptr++ = &peers.root; \
- for (u = peers.root; u != peer_avl_empty; ) { \
+ for (u = rcu_dereference_protected(peers.root, \
+ lockdep_is_held(&peers.lock)); \
+ u != peer_avl_empty; ) { \
if (_daddr == u->v4daddr) \
break; \
if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
@@ -168,7 +172,8 @@
else \
v = &u->avl_right; \
*stackptr++ = v; \
- u = *v; \
+ u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
} \
u; \
})
@@ -209,13 +214,17 @@
/* Called with local BH disabled and the pool lock held. */
#define lookup_rightempty(start) \
({ \
- struct inet_peer *u, **v; \
+ struct inet_peer *u; \
+ struct inet_peer __rcu **v; \
*stackptr++ = &start->avl_left; \
v = &start->avl_left; \
- for (u = *v; u->avl_right != peer_avl_empty; ) { \
+ for (u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
+ u->avl_right != peer_avl_empty_rcu; ) { \
v = &u->avl_right; \
*stackptr++ = v; \
- u = *v; \
+ u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
} \
u; \
})
@@ -224,74 +233,86 @@
* Variable names are the proof of operation correctness.
* Look into mm/map_avl.c for more detail description of the ideas.
*/
-static void peer_avl_rebalance(struct inet_peer **stack[],
- struct inet_peer ***stackend)
+static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
+ struct inet_peer __rcu ***stackend)
{
- struct inet_peer **nodep, *node, *l, *r;
+ struct inet_peer __rcu **nodep;
+ struct inet_peer *node, *l, *r;
int lh, rh;
while (stackend > stack) {
nodep = *--stackend;
- node = *nodep;
- l = node->avl_left;
- r = node->avl_right;
+ node = rcu_dereference_protected(*nodep,
+ lockdep_is_held(&peers.lock));
+ l = rcu_dereference_protected(node->avl_left,
+ lockdep_is_held(&peers.lock));
+ r = rcu_dereference_protected(node->avl_right,
+ lockdep_is_held(&peers.lock));
lh = node_height(l);
rh = node_height(r);
if (lh > rh + 1) { /* l: RH+2 */
struct inet_peer *ll, *lr, *lrl, *lrr;
int lrh;
- ll = l->avl_left;
- lr = l->avl_right;
+ ll = rcu_dereference_protected(l->avl_left,
+ lockdep_is_held(&peers.lock));
+ lr = rcu_dereference_protected(l->avl_right,
+ lockdep_is_held(&peers.lock));
lrh = node_height(lr);
if (lrh <= node_height(ll)) { /* ll: RH+1 */
- node->avl_left = lr; /* lr: RH or RH+1 */
- node->avl_right = r; /* r: RH */
+ RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
+ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = lrh + 1; /* RH+1 or RH+2 */
- l->avl_left = ll; /* ll: RH+1 */
- l->avl_right = node; /* node: RH+1 or RH+2 */
+ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
+ RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
l->avl_height = node->avl_height + 1;
- *nodep = l;
+ RCU_INIT_POINTER(*nodep, l);
} else { /* ll: RH, lr: RH+1 */
- lrl = lr->avl_left; /* lrl: RH or RH-1 */
- lrr = lr->avl_right; /* lrr: RH or RH-1 */
- node->avl_left = lrr; /* lrr: RH or RH-1 */
- node->avl_right = r; /* r: RH */
+ lrl = rcu_dereference_protected(lr->avl_left,
+ lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */
+ lrr = rcu_dereference_protected(lr->avl_right,
+ lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */
+ RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
+ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = rh + 1; /* node: RH+1 */
- l->avl_left = ll; /* ll: RH */
- l->avl_right = lrl; /* lrl: RH or RH-1 */
+ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
+ RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
l->avl_height = rh + 1; /* l: RH+1 */
- lr->avl_left = l; /* l: RH+1 */
- lr->avl_right = node; /* node: RH+1 */
+ RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
+ RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
lr->avl_height = rh + 2;
- *nodep = lr;
+ RCU_INIT_POINTER(*nodep, lr);
}
} else if (rh > lh + 1) { /* r: LH+2 */
struct inet_peer *rr, *rl, *rlr, *rll;
int rlh;
- rr = r->avl_right;
- rl = r->avl_left;
+ rr = rcu_dereference_protected(r->avl_right,
+ lockdep_is_held(&peers.lock));
+ rl = rcu_dereference_protected(r->avl_left,
+ lockdep_is_held(&peers.lock));
rlh = node_height(rl);
if (rlh <= node_height(rr)) { /* rr: LH+1 */
- node->avl_right = rl; /* rl: LH or LH+1 */
- node->avl_left = l; /* l: LH */
+ RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
+ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = rlh + 1; /* LH+1 or LH+2 */
- r->avl_right = rr; /* rr: LH+1 */
- r->avl_left = node; /* node: LH+1 or LH+2 */
+ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
+ RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
r->avl_height = node->avl_height + 1;
- *nodep = r;
+ RCU_INIT_POINTER(*nodep, r);
} else { /* rr: RH, rl: RH+1 */
- rlr = rl->avl_right; /* rlr: LH or LH-1 */
- rll = rl->avl_left; /* rll: LH or LH-1 */
- node->avl_right = rll; /* rll: LH or LH-1 */
- node->avl_left = l; /* l: LH */
+ rlr = rcu_dereference_protected(rl->avl_right,
+ lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */
+ rll = rcu_dereference_protected(rl->avl_left,
+ lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */
+ RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
+ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = lh + 1; /* node: LH+1 */
- r->avl_right = rr; /* rr: LH */
- r->avl_left = rlr; /* rlr: LH or LH-1 */
+ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
+ RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
r->avl_height = lh + 1; /* r: LH+1 */
- rl->avl_right = r; /* r: LH+1 */
- rl->avl_left = node; /* node: LH+1 */
+ RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
+ RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
rl->avl_height = lh + 2;
- *nodep = rl;
+ RCU_INIT_POINTER(*nodep, rl);
}
} else {
node->avl_height = (lh > rh ? lh : rh) + 1;
@@ -303,10 +324,10 @@
#define link_to_pool(n) \
do { \
n->avl_height = 1; \
- n->avl_left = peer_avl_empty; \
- n->avl_right = peer_avl_empty; \
- smp_wmb(); /* lockless readers can catch us now */ \
- **--stackptr = n; \
+ n->avl_left = peer_avl_empty_rcu; \
+ n->avl_right = peer_avl_empty_rcu; \
+ /* lockless readers can catch us now */ \
+ rcu_assign_pointer(**--stackptr, n); \
peer_avl_rebalance(stack, stackptr); \
} while (0)
@@ -330,24 +351,25 @@
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
- struct inet_peer **stack[PEER_MAXDEPTH];
- struct inet_peer ***stackptr, ***delp;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH];
+ struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(p->v4daddr, stack) != p)
BUG();
delp = stackptr - 1; /* *delp[0] == p */
- if (p->avl_left == peer_avl_empty) {
+ if (p->avl_left == peer_avl_empty_rcu) {
*delp[0] = p->avl_right;
--stackptr;
} else {
/* look for a node to insert instead of p */
struct inet_peer *t;
t = lookup_rightempty(p);
- BUG_ON(*stackptr[-1] != t);
+ BUG_ON(rcu_dereference_protected(*stackptr[-1],
+ lockdep_is_held(&peers.lock)) != t);
**--stackptr = t->avl_left;
/* t is removed, t->v4daddr > x->v4daddr for any
* x in p->avl_left subtree.
* Put t in the old place of p. */
- *delp[0] = t;
+ RCU_INIT_POINTER(*delp[0], t);
t->avl_left = p->avl_left;
t->avl_right = p->avl_right;
t->avl_height = p->avl_height;
@@ -414,7 +436,7 @@
struct inet_peer *inet_getpeer(__be32 daddr, int create)
{
struct inet_peer *p;
- struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ffcbe..70ff77f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1072,6 +1072,7 @@
break;
}
ipgre_tunnel_unlink(ign, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
t->parms.i_key = p.i_key;
@@ -1324,7 +1325,6 @@
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
@@ -1335,7 +1335,6 @@
tunnel->hlen = sizeof(struct iphdr) + 4;
dev_hold(dev);
- rcu_assign_pointer(ign->tunnels_wc[0], tunnel);
}
@@ -1382,10 +1381,12 @@
if ((err = register_netdev(ign->fb_tunnel_dev)))
goto err_reg_dev;
+ rcu_assign_pointer(ign->tunnels_wc[0],
+ netdev_priv(ign->fb_tunnel_dev));
return 0;
err_reg_dev:
- free_netdev(ign->fb_tunnel_dev);
+ ipgre_dev_free(ign->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64b70ad..3948c86 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -238,7 +238,7 @@
but receiver should be enough clever f.e. to forward mtrace requests,
sent to multicast group to reach destination designated router.
*/
-struct ip_ra_chain *ip_ra_chain;
+struct ip_ra_chain __rcu *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
@@ -253,7 +253,8 @@
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
{
- struct ip_ra_chain *ra, *new_ra, **rap;
+ struct ip_ra_chain *ra, *new_ra;
+ struct ip_ra_chain __rcu **rap;
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
return -EINVAL;
@@ -261,7 +262,10 @@
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock);
- for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
+ for (rap = &ip_ra_chain;
+ (ra = rcu_dereference_protected(*rap,
+ lockdep_is_held(&ip_ra_lock))) != NULL;
+ rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
spin_unlock_bh(&ip_ra_lock);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e9b816e..cd300aa 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -676,6 +676,7 @@
}
t = netdev_priv(dev);
ipip_tunnel_unlink(ipn, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3cad259..3fac340 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -927,6 +927,7 @@
private = &tmp;
}
#endif
+ memset(&info, 0, sizeof(info));
info.valid_hooks = t->valid_hooks;
memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d31b007..a846d63 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1124,6 +1124,7 @@
private = &tmp;
}
#endif
+ memset(&info, 0, sizeof(info));
info.valid_hooks = t->valid_hooks;
memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 295c974..c04787c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -47,26 +47,6 @@
return rcu_dereference(nf_nat_protos[protonum]);
}
-static const struct nf_nat_protocol *
-nf_nat_proto_find_get(u_int8_t protonum)
-{
- const struct nf_nat_protocol *p;
-
- rcu_read_lock();
- p = __nf_nat_proto_find(protonum);
- if (!try_module_get(p->me))
- p = &nf_nat_unknown_protocol;
- rcu_read_unlock();
-
- return p;
-}
-
-static void
-nf_nat_proto_put(const struct nf_nat_protocol *p)
-{
- module_put(p->me);
-}
-
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
hash_by_src(const struct net *net, u16 zone,
@@ -588,6 +568,26 @@
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
+static const struct nf_nat_protocol *
+nf_nat_proto_find_get(u_int8_t protonum)
+{
+ const struct nf_nat_protocol *p;
+
+ rcu_read_lock();
+ p = __nf_nat_proto_find(protonum);
+ if (!try_module_get(p->me))
+ p = &nf_nat_unknown_protocol;
+ rcu_read_unlock();
+
+ return p;
+}
+
+static void
+nf_nat_proto_put(const struct nf_nat_protocol *p)
+{
+ module_put(p->me);
+}
+
static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
[CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
[CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4ae1f20..1b48eb1 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,13 +59,13 @@
local_bh_enable();
socket_seq_show(seq);
- seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
+ seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
sock_prot_inuse_get(net, &tcp_prot), orphans,
tcp_death_row.tw_count, sockets,
- atomic_read(&tcp_memory_allocated));
- seq_printf(seq, "UDP: inuse %d mem %d\n",
+ atomic_long_read(&tcp_memory_allocated));
+ seq_printf(seq, "UDP: inuse %d mem %ld\n",
sock_prot_inuse_get(net, &udp_prot),
- atomic_read(&udp_memory_allocated));
+ atomic_long_read(&udp_memory_allocated));
seq_printf(seq, "UDPLITE: inuse %d\n",
sock_prot_inuse_get(net, &udplite_prot));
seq_printf(seq, "RAW: inuse %d\n",
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 65699c2..9ae5c01 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,7 +28,7 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
+const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
/*
* Add a protocol handler to the hash tables
@@ -38,7 +38,8 @@
{
int hash = protocol & (MAX_INET_PROTOS - 1);
- return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
+ return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet_add_protocol);
@@ -50,7 +51,8 @@
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
+ ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d6cb2bf..987bf9ad 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -198,7 +198,7 @@
*/
struct rt_hash_bucket {
- struct rtable *chain;
+ struct rtable __rcu *chain;
};
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@@ -280,7 +280,7 @@
struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
- if (!rt_hash_table[st->bucket].chain)
+ if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
continue;
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -300,17 +300,17 @@
{
struct rt_cache_iter_state *st = seq->private;
- r = r->dst.rt_next;
+ r = rcu_dereference_bh(r->dst.rt_next);
while (!r) {
rcu_read_unlock_bh();
do {
if (--st->bucket < 0)
return NULL;
- } while (!rt_hash_table[st->bucket].chain);
+ } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
rcu_read_lock_bh();
- r = rt_hash_table[st->bucket].chain;
+ r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
}
- return rcu_dereference_bh(r);
+ return r;
}
static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -721,19 +721,23 @@
for (i = 0; i <= rt_hash_mask; i++) {
if (process_context && need_resched())
cond_resched();
- rth = rt_hash_table[i].chain;
+ rth = rcu_dereference_raw(rt_hash_table[i].chain);
if (!rth)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
#ifdef CONFIG_NET_NS
{
- struct rtable ** prev, * p;
+ struct rtable __rcu **prev;
+ struct rtable *p;
- rth = rt_hash_table[i].chain;
+ rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ lockdep_is_held(rt_hash_lock_addr(i)));
/* defer releasing the head of the list after spin_unlock */
- for (tail = rth; tail; tail = tail->dst.rt_next)
+ for (tail = rth; tail;
+ tail = rcu_dereference_protected(tail->dst.rt_next,
+ lockdep_is_held(rt_hash_lock_addr(i))))
if (!rt_is_expired(tail))
break;
if (rth != tail)
@@ -741,8 +745,12 @@
/* call rt_free on entries after the tail requiring flush */
prev = &rt_hash_table[i].chain;
- for (p = *prev; p; p = next) {
- next = p->dst.rt_next;
+ for (p = rcu_dereference_protected(*prev,
+ lockdep_is_held(rt_hash_lock_addr(i)));
+ p != NULL;
+ p = next) {
+ next = rcu_dereference_protected(p->dst.rt_next,
+ lockdep_is_held(rt_hash_lock_addr(i)));
if (!rt_is_expired(p)) {
prev = &p->dst.rt_next;
} else {
@@ -752,14 +760,15 @@
}
}
#else
- rth = rt_hash_table[i].chain;
- rt_hash_table[i].chain = NULL;
+ rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ lockdep_is_held(rt_hash_lock_addr(i)));
+ rcu_assign_pointer(rt_hash_table[i].chain, NULL);
tail = NULL;
#endif
spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth != tail; rth = next) {
- next = rth->dst.rt_next;
+ next = rcu_dereference_protected(rth->dst.rt_next, 1);
rt_free(rth);
}
}
@@ -790,7 +799,7 @@
while (aux != rth) {
if (compare_hash_inputs(&aux->fl, &rth->fl))
return 0;
- aux = aux->dst.rt_next;
+ aux = rcu_dereference_protected(aux->dst.rt_next, 1);
}
return ONE;
}
@@ -799,7 +808,8 @@
{
static unsigned int rover;
unsigned int i = rover, goal;
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0;
unsigned long delta;
@@ -825,11 +835,12 @@
samples++;
- if (*rthp == NULL)
+ if (rcu_dereference_raw(*rthp) == NULL)
continue;
length = 0;
spin_lock_bh(rt_hash_lock_addr(i));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
prefetch(rth->dst.rt_next);
if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next;
@@ -941,7 +952,8 @@
static unsigned long last_gc;
static int rover;
static int equilibrium;
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
unsigned long now = jiffies;
int goal;
int entries = dst_entries_get_fast(&ipv4_dst_ops);
@@ -995,7 +1007,8 @@
k = (k + 1) & rt_hash_mask;
rthp = &rt_hash_table[k].chain;
spin_lock_bh(rt_hash_lock_addr(k));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
if (!rt_is_expired(rth) &&
!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1;
@@ -1071,7 +1084,7 @@
while (rth) {
length += has_noalias(head, rth);
- rth = rth->dst.rt_next;
+ rth = rcu_dereference_protected(rth->dst.rt_next, 1);
}
return length >> FRACT_BITS;
}
@@ -1079,9 +1092,9 @@
static int rt_intern_hash(unsigned hash, struct rtable *rt,
struct rtable **rp, struct sk_buff *skb, int ifindex)
{
- struct rtable *rth, **rthp;
+ struct rtable *rth, *cand;
+ struct rtable __rcu **rthp, **candp;
unsigned long now;
- struct rtable *cand, **candp;
u32 min_score;
int chain_length;
int attempts = !in_softirq();
@@ -1128,7 +1141,8 @@
rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next;
rt_free(rth);
@@ -1324,12 +1338,14 @@
static void rt_del(unsigned hash, struct rtable *rt)
{
- struct rtable **rthp, *aux;
+ struct rtable __rcu **rthp;
+ struct rtable *aux;
rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt);
- while ((aux = *rthp) != NULL) {
+ while ((aux = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (aux == rt || rt_is_expired(aux)) {
*rthp = aux->dst.rt_next;
rt_free(aux);
@@ -1346,7 +1362,8 @@
{
int i, k;
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
__be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
struct netevent_redirect netevent;
@@ -1379,7 +1396,7 @@
unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
rt_genid(net));
- rthp=&rt_hash_table[hash].chain;
+ rthp = &rt_hash_table[hash].chain;
while ((rth = rcu_dereference(*rthp)) != NULL) {
struct rtable *rt;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d96c1da..e91911d 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -398,7 +398,7 @@
.data = &sysctl_tcp_mem,
.maxlen = sizeof(sysctl_tcp_mem),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_doulongvec_minmax
},
{
.procname = "tcp_wmem",
@@ -602,8 +602,7 @@
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero
+ .proc_handler = proc_doulongvec_minmax,
},
{
.procname = "udp_rmem_min",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1664a05..08141996 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,7 +282,7 @@
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
-int sysctl_tcp_mem[3] __read_mostly;
+long sysctl_tcp_mem[3] __read_mostly;
int sysctl_tcp_wmem[3] __read_mostly;
int sysctl_tcp_rmem[3] __read_mostly;
@@ -290,7 +290,7 @@
EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);
-atomic_t tcp_memory_allocated; /* Current allocated memory. */
+atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
/*
@@ -2246,7 +2246,7 @@
/* Values greater than interface MTU won't take effect. However
* at the point when this call is done we typically don't yet
* know which interface is going to be used */
- if (val < 8 || val > MAX_TCP_WINDOW) {
+ if (val < 64 || val > MAX_TCP_WINDOW) {
err = -EINVAL;
break;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3357f69..6d8ab1c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -259,8 +259,11 @@
int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
sizeof(struct sk_buff);
- if (sk->sk_sndbuf < 3 * sndmem)
- sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
+ if (sk->sk_sndbuf < 3 * sndmem) {
+ sk->sk_sndbuf = 3 * sndmem;
+ if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
+ sk->sk_sndbuf = sysctl_tcp_wmem[2];
+ }
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -396,7 +399,7 @@
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
!tcp_memory_pressure &&
- atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+ atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
sysctl_tcp_rmem[2]);
}
@@ -4861,7 +4864,7 @@
return 0;
/* If we are under soft global TCP memory pressure, do not expand. */
- if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+ if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
return 0;
/* If we filled the congestion window, do not expand. */
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 9a17bd2..ac3b3ee 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,27 +14,32 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
-static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
static DEFINE_MUTEX(tunnel4_mutex);
-static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
+static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
{
return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
}
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
{
- struct xfrm_tunnel **pprev;
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
+
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel4_mutex);
- for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
- if ((*pprev)->priority > priority)
+ for (pprev = fam_handlers(family);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel4_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
break;
- if ((*pprev)->priority == priority)
+ if (t->priority == priority)
goto err;
}
@@ -52,13 +57,17 @@
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
{
- struct xfrm_tunnel **pprev;
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel4_mutex);
- for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
- if (*pprev == handler) {
+ for (pprev = fam_handlers(family);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel4_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b3f7e8c..5e0a3a5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -110,7 +110,7 @@
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
-int sysctl_udp_mem[3] __read_mostly;
+long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_udp_mem);
int sysctl_udp_rmem_min __read_mostly;
@@ -119,7 +119,7 @@
int sysctl_udp_wmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_wmem_min);
-atomic_t udp_memory_allocated;
+atomic_long_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
#define MAX_UDP_PORTS 65536
@@ -1413,7 +1413,7 @@
}
}
- if (sk->sk_filter) {
+ if (rcu_dereference_raw(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ec7a91d..e048ec6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -836,7 +836,7 @@
{
struct inet6_dev *idev = ifp->idev;
struct in6_addr addr, *tmpaddr;
- unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp;
+ unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
@@ -886,12 +886,13 @@
goto out;
}
memcpy(&addr.s6_addr[8], idev->rndid, 8);
+ age = (jiffies - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
- idev->cnf.temp_valid_lft);
+ idev->cnf.temp_valid_lft + age);
tmp_prefered_lft = min_t(__u32,
ifp->prefered_lft,
- idev->cnf.temp_prefered_lft -
+ idev->cnf.temp_prefered_lft + age -
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
@@ -1426,8 +1427,10 @@
{
struct inet6_dev *idev = ifp->idev;
- if (addrconf_dad_end(ifp))
+ if (addrconf_dad_end(ifp)) {
+ in6_ifa_put(ifp);
return;
+ }
if (net_ratelimit())
printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
@@ -2021,10 +2024,11 @@
ipv6_ifa_notify(0, ift);
}
- if (create && in6_dev->cnf.use_tempaddr > 0) {
+ if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
/*
* When a new public address is created as described in [ADDRCONF],
- * also create a new temporary address.
+ * also create a new temporary address. Also create a temporary
+ * address if it's enabled but no temporary address currently exists.
*/
read_unlock_bh(&in6_dev->lock);
ipv6_create_tempaddr(ifp, NULL);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c2c0f89..2a59610 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1284,6 +1284,7 @@
t = netdev_priv(dev);
ip6_tnl_unlink(ip6n, t);
+ synchronize_net();
err = ip6_tnl_change(t, &p);
ip6_tnl_link(ip6n, t);
netdev_state_change(dev);
@@ -1371,6 +1372,7 @@
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0553867..d1770e0 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,6 +343,10 @@
break;
case IPV6_TRANSPARENT:
+ if (!capable(CAP_NET_ADMIN)) {
+ retv = -EPERM;
+ break;
+ }
if (optlen < sizeof(int))
goto e_inval;
/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 44d2eea..4484648 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -5,10 +5,15 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
+config NF_DEFRAG_IPV6
+ tristate
+ default n
+
config NF_CONNTRACK_IPV6
tristate "IPv6 connection tracking support"
depends on INET && IPV6 && NF_CONNTRACK
default m if NETFILTER_ADVANCED=n
+ select NF_DEFRAG_IPV6
---help---
Connection tracking keeps a record of what packets have passed
through your machine, in order to figure out how they are related
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 3f8e4a3..0a432c9 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -12,11 +12,14 @@
# objects for l3 independent conntrack
nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
-nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
# l3 independent conntrack
obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
+# defrag
+nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
+obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
+
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 51df035..4555823 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1137,6 +1137,7 @@
private = &tmp;
}
#endif
+ memset(&info, 0, sizeof(info));
info.valid_hooks = t->valid_hooks;
memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 489d71b..3a3f129 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -625,21 +625,24 @@
inet_frags_init_net(&nf_init_frags);
inet_frags_init(&nf_frags);
+#ifdef CONFIG_SYSCTL
nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
nf_ct_frag6_sysctl_table);
if (!nf_ct_frag6_sysctl_header) {
inet_frags_fini(&nf_frags);
return -ENOMEM;
}
+#endif
return 0;
}
void nf_ct_frag6_cleanup(void)
{
+#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nf_ct_frag6_sysctl_header);
nf_ct_frag6_sysctl_header = NULL;
-
+#endif
inet_frags_fini(&nf_frags);
nf_init_frags.low_thresh = 0;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index d082eae..24b3558 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -126,6 +126,8 @@
SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
+ SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
+ SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_SENTINEL
};
@@ -134,6 +136,8 @@
SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
+ SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
+ SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9bb936a..9a7978f 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,13 +25,14 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
+const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int hash = protocol & (MAX_INET_PROTOS - 1);
- return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
+ return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_protocol);
@@ -43,7 +44,8 @@
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
+ ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 45e6efb7..86c3952 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -373,7 +373,7 @@
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
- if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
+ if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index c7ba314..0f27664 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -349,7 +349,7 @@
/* Check for overlap with preceding fragment. */
if (prev &&
- (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+ (FRAG6_CB(prev)->offset + prev->len) > offset)
goto discard_fq;
/* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 25661f9..fc32833 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2741,6 +2741,7 @@
kfree(net->ipv6.ip6_prohibit_entry);
kfree(net->ipv6.ip6_blk_hole_entry);
#endif
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
}
static struct pernet_operations ip6_route_net_ops = {
@@ -2832,5 +2833,6 @@
xfrm6_fini();
fib6_gc_cleanup();
unregister_pernet_subsys(&ip6_route_net_ops);
+ dst_entries_destroy(&ip6_dst_blackhole_ops);
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 367a6cc..d6bfaec 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -963,6 +963,7 @@
}
t = netdev_priv(dev);
ipip6_tunnel_unlink(sitn, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index d986472..4f3cec1 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,23 +30,26 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
-static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
static DEFINE_MUTEX(tunnel6_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
{
- struct xfrm6_tunnel **pprev;
+ struct xfrm6_tunnel __rcu **pprev;
+ struct xfrm6_tunnel *t;
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
- *pprev; pprev = &(*pprev)->next) {
- if ((*pprev)->priority > priority)
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel6_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
break;
- if ((*pprev)->priority == priority)
+ if (t->priority == priority)
goto err;
}
@@ -65,14 +68,17 @@
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
{
- struct xfrm6_tunnel **pprev;
+ struct xfrm6_tunnel __rcu **pprev;
+ struct xfrm6_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
- *pprev; pprev = &(*pprev)->next) {
- if (*pprev == handler) {
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel6_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c84dad4..91def93 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -527,7 +527,7 @@
}
}
- if (sk->sk_filter) {
+ if (rcu_dereference_raw(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1712af1..c64ce0a 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,10 @@
spinlock_t l2tp_session_hlist_lock;
};
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+
static inline struct l2tp_net *l2tp_pernet(struct net *net)
{
BUG_ON(!net);
@@ -118,6 +122,34 @@
return net_generic(net, l2tp_net_id);
}
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ l2tp_tunnel_free(tunnel);
+}
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_tunnel_inc_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_inc_refcount_1(_t); \
+ } while (0)
+#define l2tp_tunnel_dec_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_dec_refcount_1(_t); \
+ } while (0)
+#else
+#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
+#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
+#endif
+
/* Session hash global list for L2TPv3.
* The session_id SHOULD be random according to RFC3931, but several
* L2TP implementations use incrementing session_ids. So we do a real
@@ -699,8 +731,8 @@
* Returns 1 if the packet was not a good data packet and could not be
* forwarded. All such packets are passed up to userspace to deal with.
*/
-int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
- int (*payload_hook)(struct sk_buff *skb))
+static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
+ int (*payload_hook)(struct sk_buff *skb))
{
struct l2tp_session *session = NULL;
unsigned char *ptr, *optr;
@@ -812,7 +844,6 @@
return 1;
}
-EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes:
@@ -922,7 +953,8 @@
return bufp - optr;
}
-int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
+ size_t data_len)
{
struct l2tp_tunnel *tunnel = session->tunnel;
unsigned int len = skb->len;
@@ -970,7 +1002,6 @@
return 0;
}
-EXPORT_SYMBOL_GPL(l2tp_xmit_core);
/* Automatically called when the skb is freed.
*/
@@ -1089,7 +1120,7 @@
* The tunnel context is deleted only when all session sockets have been
* closed.
*/
-void l2tp_tunnel_destruct(struct sock *sk)
+static void l2tp_tunnel_destruct(struct sock *sk)
{
struct l2tp_tunnel *tunnel;
@@ -1128,11 +1159,10 @@
end:
return;
}
-EXPORT_SYMBOL(l2tp_tunnel_destruct);
/* When the tunnel is closed, all the attached sessions need to go too.
*/
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
int hash;
struct hlist_node *walk;
@@ -1193,12 +1223,11 @@
}
write_unlock_bh(&tunnel->hlist_lock);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
/* Really kill the tunnel.
* Come here only when all sessions have been cleared from the tunnel.
*/
-void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
@@ -1217,7 +1246,6 @@
atomic_dec(&l2tp_tunnel_count);
kfree(tunnel);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
/* Create a socket for the tunnel, if one isn't set up by
* userspace. This is used for static tunnels where there is no
@@ -1512,7 +1540,7 @@
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_len parameters are set.
*/
-void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
if (version == L2TP_HDR_VER_2) {
session->hdr_len = 6;
@@ -1525,7 +1553,6 @@
}
}
-EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index f0f318e..a16a48e 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -231,48 +231,15 @@
extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
extern void l2tp_session_free(struct l2tp_session *session);
extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
-extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-extern void l2tp_tunnel_destruct(struct sock *sk);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_inc_refcount_1(_t); \
- } while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_dec_refcount_1(_t); \
- } while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
/* Session reference counts. Incremented when code obtains a reference
* to a session.
*/
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 104ec3b..b8dbae8 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -249,7 +249,7 @@
struct seq_file *seq;
int rc = -ENOMEM;
- pd = kzalloc(GFP_KERNEL, sizeof(*pd));
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (pd == NULL)
goto out;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 1c770c0..0bf6a59 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -576,7 +576,7 @@
return copied;
}
-struct proto l2tp_ip_prot = {
+static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
.init = l2tp_ip_open,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 43288259..1534f2b 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -525,6 +525,7 @@
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
This option adds a `TPROXY' target, which is somewhat similar to
REDIRECT. It can only be used in the mangle table and is useful
@@ -927,6 +928,7 @@
depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
This option adds a `socket' match, which can be used to match
packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 1eacf8d..27a5ea6 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1312,7 +1312,8 @@
if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
- hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
}
if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ed6d929..dc7bb74 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -292,6 +292,12 @@
for (i = 0; i < MAX_NF_CT_PROTO; i++)
proto_array[i] = &nf_conntrack_l4proto_generic;
+
+ /* Before making proto_array visible to lockless readers,
+ * we must make sure its content is committed to memory.
+ */
+ smp_wmb();
+
nf_ct_protos[l4proto->l3proto] = proto_array;
} else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
&nf_conntrack_l4proto_generic) {
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 19c482c..640678f 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -21,7 +21,9 @@
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
@@ -172,7 +174,7 @@
return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
static inline const struct in6_addr *
tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
@@ -372,7 +374,7 @@
.hooks = 1 << NF_INET_PRE_ROUTING,
.me = THIS_MODULE,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
{
.name = "TPROXY",
.family = NFPROTO_IPV6,
@@ -391,7 +393,7 @@
static int __init tproxy_tg_init(void)
{
nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
nf_defrag_ipv6_enable();
#endif
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 2dbd4c8..00d6ae83 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -14,7 +14,6 @@
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h>
@@ -22,7 +21,12 @@
#include <net/inet_sock.h>
#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_SOCKET_HAVE_IPV6 1
+#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#endif
#include <linux/netfilter/xt_socket.h>
@@ -186,12 +190,12 @@
return socket_match(skb, par, par->matchinfo);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
static int
extract_icmp6_fields(const struct sk_buff *skb,
unsigned int outside_hdrlen,
- u8 *protocol,
+ int *protocol,
struct in6_addr **raddr,
struct in6_addr **laddr,
__be16 *rport,
@@ -248,8 +252,7 @@
struct sock *sk;
struct in6_addr *daddr, *saddr;
__be16 dport, sport;
- int thoff;
- u8 tproto;
+ int thoff, tproto;
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
@@ -301,7 +304,7 @@
sk = NULL;
}
- pr_debug("proto %hhu %pI6:%hu -> %pI6:%hu "
+ pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
"(orig %pI6:%hu) sock %p\n",
tproto, saddr, ntohs(sport),
daddr, ntohs(dport),
@@ -331,7 +334,7 @@
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
{
.name = "socket",
.revision = 1,
@@ -348,7 +351,7 @@
static int __init socket_mt_init(void)
{
nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
nf_defrag_ipv6_enable();
#endif
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index cd96ed3..478181d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -83,9 +83,9 @@
struct module *module;
};
-struct listeners_rcu_head {
- struct rcu_head rcu_head;
- void *ptr;
+struct listeners {
+ struct rcu_head rcu;
+ unsigned long masks[0];
};
#define NETLINK_KERNEL_SOCKET 0x1
@@ -119,7 +119,7 @@
struct netlink_table {
struct nl_pid_hash hash;
struct hlist_head mc_list;
- unsigned long *listeners;
+ struct listeners __rcu *listeners;
unsigned int nl_nonroot;
unsigned int groups;
struct mutex *cb_mutex;
@@ -338,7 +338,7 @@
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
- tbl->listeners[i] = mask;
+ tbl->listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
@@ -936,7 +936,7 @@
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
int res = 0;
- unsigned long *listeners;
+ struct listeners *listeners;
BUG_ON(!netlink_is_kernel(sk));
@@ -944,7 +944,7 @@
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
if (group - 1 < nl_table[sk->sk_protocol].groups)
- res = test_bit(group - 1, listeners);
+ res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
@@ -1498,7 +1498,7 @@
struct socket *sock;
struct sock *sk;
struct netlink_sock *nlk;
- unsigned long *listeners = NULL;
+ struct listeners *listeners = NULL;
BUG_ON(!nl_table);
@@ -1523,8 +1523,7 @@
if (groups < 32)
groups = 32;
- listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
- GFP_KERNEL);
+ listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
goto out_sock_release;
@@ -1541,7 +1540,7 @@
netlink_table_grab();
if (!nl_table[unit].registered) {
nl_table[unit].groups = groups;
- nl_table[unit].listeners = listeners;
+ rcu_assign_pointer(nl_table[unit].listeners, listeners);
nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
nl_table[unit].registered = 1;
@@ -1572,43 +1571,28 @@
EXPORT_SYMBOL(netlink_kernel_release);
-static void netlink_free_old_listeners(struct rcu_head *rcu_head)
+static void listeners_free_rcu(struct rcu_head *head)
{
- struct listeners_rcu_head *lrh;
-
- lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head);
- kfree(lrh->ptr);
+ kfree(container_of(head, struct listeners, rcu));
}
int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
- unsigned long *listeners, *old = NULL;
- struct listeners_rcu_head *old_rcu_head;
+ struct listeners *new, *old;
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
if (groups < 32)
groups = 32;
if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
- listeners = kzalloc(NLGRPSZ(groups) +
- sizeof(struct listeners_rcu_head),
- GFP_ATOMIC);
- if (!listeners)
+ new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
+ if (!new)
return -ENOMEM;
- old = tbl->listeners;
- memcpy(listeners, old, NLGRPSZ(tbl->groups));
- rcu_assign_pointer(tbl->listeners, listeners);
- /*
- * Free the old memory after an RCU grace period so we
- * don't leak it. We use call_rcu() here in order to be
- * able to call this function from atomic contexts. The
- * allocation of this memory will have reserved enough
- * space for struct listeners_rcu_head at the end.
- */
- old_rcu_head = (void *)(tbl->listeners +
- NLGRPLONGS(tbl->groups));
- old_rcu_head->ptr = old;
- call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners);
+ old = rcu_dereference_raw(tbl->listeners);
+ memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
+ rcu_assign_pointer(tbl->listeners, new);
+
+ call_rcu(&old->rcu, listeners_free_rcu);
}
tbl->groups = groups;
@@ -2104,18 +2088,17 @@
static void __init netlink_add_usersock_entry(void)
{
- unsigned long *listeners;
+ struct listeners *listeners;
int groups = 32;
- listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
- GFP_KERNEL);
+ listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
- panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+ panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
netlink_table_grab();
nl_table[NETLINK_USERSOCK].groups = groups;
- nl_table[NETLINK_USERSOCK].listeners = listeners;
+ rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3616f27b..0856a13 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1719,7 +1719,7 @@
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
- strlcpy(uaddr->sa_data, dev->name, 15);
+ strncpy(uaddr->sa_data, dev->name, 14);
else
memset(uaddr->sa_data, 0, 14);
rcu_read_unlock();
@@ -1742,6 +1742,7 @@
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
sll->sll_protocol = po->num;
+ sll->sll_pkttype = 0;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
if (dev) {
diff --git a/net/rds/loop.c b/net/rds/loop.c
index c390156..aeec1d4 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -134,8 +134,12 @@
static void rds_loop_conn_free(void *arg)
{
struct rds_loop_connection *lc = arg;
+ unsigned long flags;
+
rdsdebug("lc %p\n", lc);
+ spin_lock_irqsave(&loop_conns_lock, flags);
list_del(&lc->loop_node);
+ spin_unlock_irqrestore(&loop_conns_lock, flags);
kfree(lc);
}
diff --git a/net/rds/message.c b/net/rds/message.c
index a84545d..1fd3d29 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -224,6 +224,9 @@
WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
WARN_ON(!nents);
+ if (rm->m_used_sgs + nents > rm->m_total_sgs)
+ return NULL;
+
sg_ret = &sg_first[rm->m_used_sgs];
sg_init_table(sg_ret, nents);
rm->m_used_sgs += nents;
@@ -246,6 +249,10 @@
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
rm->data.op_nents = ceil(total_len, PAGE_SIZE);
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+ if (!rm->data.op_sg) {
+ rds_message_put(rm);
+ return ERR_PTR(-ENOMEM);
+ }
for (i = 0; i < rm->data.op_nents; ++i) {
sg_set_page(&rm->data.op_sg[i],
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 1a41deb..8920f2a 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -479,13 +479,38 @@
/*
- * Count the number of pages needed to describe an incoming iovec.
+ * Count the number of pages needed to describe an incoming iovec array.
*/
-static int rds_rdma_pages(struct rds_rdma_args *args)
+static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
+{
+ int tot_pages = 0;
+ unsigned int nr_pages;
+ unsigned int i;
+
+ /* figure out the number of pages in the vector */
+ for (i = 0; i < nr_iovecs; i++) {
+ nr_pages = rds_pages_in_vec(&iov[i]);
+ if (nr_pages == 0)
+ return -EINVAL;
+
+ tot_pages += nr_pages;
+
+ /*
+ * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
+ * so tot_pages cannot overflow without first going negative.
+ */
+ if (tot_pages < 0)
+ return -EINVAL;
+ }
+
+ return tot_pages;
+}
+
+int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
- unsigned int tot_pages = 0;
+ int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
@@ -502,14 +527,16 @@
return -EINVAL;
tot_pages += nr_pages;
+
+ /*
+ * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
+ * so tot_pages cannot overflow without first going negative.
+ */
+ if (tot_pages < 0)
+ return -EINVAL;
}
- return tot_pages;
-}
-
-int rds_rdma_extra_size(struct rds_rdma_args *args)
-{
- return rds_rdma_pages(args) * sizeof(struct scatterlist);
+ return tot_pages * sizeof(struct scatterlist);
}
/*
@@ -520,13 +547,12 @@
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
- struct rds_iovec vec;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
- struct rds_iovec __user *local_vec;
- unsigned int nr;
+ struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
+ int iov_size;
unsigned int i, j;
int ret = 0;
@@ -546,9 +572,26 @@
goto out;
}
- nr_pages = rds_rdma_pages(args);
- if (nr_pages < 0)
+ /* Check whether to allocate the iovec area */
+ iov_size = args->nr_local * sizeof(struct rds_iovec);
+ if (args->nr_local > UIO_FASTIOV) {
+ iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
+ if (!iovs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
+ ret = -EFAULT;
goto out;
+ }
+
+ nr_pages = rds_rdma_pages(iovs, args->nr_local);
+ if (nr_pages < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -564,6 +607,10 @@
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
+ if (!op->op_sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
@@ -597,50 +644,40 @@
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
- local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
-
for (i = 0; i < args->nr_local; i++) {
- if (copy_from_user(&vec, &local_vec[i],
- sizeof(struct rds_iovec))) {
- ret = -EFAULT;
- goto out;
- }
+ struct rds_iovec *iov = &iovs[i];
+ /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
+ unsigned int nr = rds_pages_in_vec(iov);
- nr = rds_pages_in_vec(&vec);
- if (nr == 0) {
- ret = -EINVAL;
- goto out;
- }
-
- rs->rs_user_addr = vec.addr;
- rs->rs_user_bytes = vec.bytes;
+ rs->rs_user_addr = iov->addr;
+ rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
- ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
+ ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
- rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
- nr_bytes, nr, vec.bytes, vec.addr);
+ rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
+ nr_bytes, nr, iov->bytes, iov->addr);
- nr_bytes += vec.bytes;
+ nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
- unsigned int offset = vec.addr & ~PAGE_MASK;
+ unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
- min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
+ min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
- rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
- sg->offset, sg->length, vec.addr, vec.bytes);
+ rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
+ sg->offset, sg->length, iov->addr, iov->bytes);
- vec.addr += sg->length;
- vec.bytes -= sg->length;
+ iov->addr += sg->length;
+ iov->bytes -= sg->length;
}
op->op_nents += nr;
@@ -655,13 +692,14 @@
}
op->op_bytes = nr_bytes;
- ret = 0;
out:
+ if (iovs != iovstack)
+ sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
if (ret)
rds_rdma_free_op(op);
-
- rds_stats_inc(s_send_rdma);
+ else
+ rds_stats_inc(s_send_rdma);
return ret;
}
@@ -773,6 +811,10 @@
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
+ if (!rm->atomic.op_sg) {
+ ret = -ENOMEM;
+ goto err;
+ }
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
diff --git a/net/rds/send.c b/net/rds/send.c
index 0bc9db1..35b9c2e 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -973,6 +973,10 @@
/* Attach data to the rm */
if (payload_len) {
rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
+ if (!rm->data.op_sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
if (ret)
goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 08a8c6c..8e0a320 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -221,7 +221,13 @@
static void rds_tcp_conn_free(void *arg)
{
struct rds_tcp_connection *tc = arg;
+ unsigned long flags;
rdsdebug("freeing tc %p\n", tc);
+
+ spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+ list_del(&tc->t_tcp_node);
+ spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
kmem_cache_free(rds_tcp_conn_slab, tc);
}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index efd4f95..f23d915 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -268,6 +268,10 @@
goto nla_put_failure;
nla_nest_end(skb, nest);
+
+ if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
+ goto nla_put_failure;
+
return skb->len;
nla_put_failure:
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 37dff78..d49c40f 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -34,8 +34,6 @@
.populate = cgrp_populate,
#ifdef CONFIG_NET_CLS_CGROUP
.subsys_id = net_cls_subsys_id,
-#else
-#define net_cls_subsys_id net_cls_subsys.subsys_id
#endif
.module = THIS_MODULE,
};
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 7632532..ea8f566 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -103,7 +103,8 @@
static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
{
- textsearch_destroy(EM_TEXT_PRIV(m)->config);
+ if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
+ textsearch_destroy(EM_TEXT_PRIV(m)->config);
}
static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1ef29c7..e58f947 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -92,7 +92,7 @@
struct kmem_cache *sctp_chunk_cachep __read_mostly;
struct kmem_cache *sctp_bucket_cachep __read_mostly;
-int sysctl_sctp_mem[3];
+long sysctl_sctp_mem[3];
int sysctl_sctp_rmem[3];
int sysctl_sctp_wmem[3];
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e34ca9c..6bd5543 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -111,12 +111,12 @@
static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
extern struct kmem_cache *sctp_bucket_cachep;
-extern int sysctl_sctp_mem[3];
+extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
extern int sysctl_sctp_wmem[3];
static int sctp_memory_pressure;
-static atomic_t sctp_memory_allocated;
+static atomic_long_t sctp_memory_allocated;
struct percpu_counter sctp_sockets_allocated;
static void sctp_enter_memory_pressure(struct sock *sk)
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 832590b..50cb57f 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -54,7 +54,7 @@
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
static int rwnd_scale_max = 16;
-extern int sysctl_sctp_mem[3];
+extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
extern int sysctl_sctp_wmem[3];
@@ -203,7 +203,7 @@
.data = &sysctl_sctp_mem,
.maxlen = sizeof(sysctl_sctp_mem),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_doulongvec_minmax
},
{
.procname = "sctp_rmem",
diff --git a/net/socket.c b/net/socket.c
index abf3e25..2808b4d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1652,6 +1652,8 @@
struct iovec iov;
int fput_needed;
+ if (len > INT_MAX)
+ len = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
@@ -1709,6 +1711,8 @@
int err, err2;
int fput_needed;
+ if (size > INT_MAX)
+ size = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 33217fc..e9f0d50 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -396,6 +396,7 @@
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct tipc_sock *tsock = tipc_sk(sock->sk);
+ memset(addr, 0, sizeof(*addr));
if (peer) {
if ((sock->state != SS_CONNECTED) &&
((peer != 2) || (sock->state != SS_DISCONNECTING)))
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 771bab0..3a8c4c4 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -134,15 +134,15 @@
case X25_FAC_CLASS_D:
switch (*p) {
case X25_FAC_CALLING_AE:
- if (p[1] > X25_MAX_DTE_FACIL_LEN)
- break;
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return 0;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
break;
case X25_FAC_CALLED_AE:
- if (p[1] > X25_MAX_DTE_FACIL_LEN)
- break;
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return 0;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 6317896..f729f02 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -119,6 +119,8 @@
&x25->vc_facil_mask);
if (len > 0)
skb_pull(skb, len);
+ else
+ return -1;
/*
* Copy any Call User Data.
*/