mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/yellowfin.c
This commit is contained in:
commit
6cdee2f96a
50 changed files with 389 additions and 227 deletions
|
@ -1727,12 +1727,14 @@ config KS8842
|
|||
tristate "Micrel KSZ8842"
|
||||
depends on HAS_IOMEM
|
||||
help
|
||||
This platform driver is for Micrel KSZ8842 chip.
|
||||
This platform driver is for Micrel KSZ8842 / KS8842
|
||||
2-port ethernet switch chip (managed, VLAN, QoS).
|
||||
|
||||
config KS8851
|
||||
tristate "Micrel KS8851 SPI"
|
||||
depends on SPI
|
||||
select MII
|
||||
select CRC32
|
||||
help
|
||||
SPI driver for Micrel KS8851 SPI attached network chip.
|
||||
|
||||
|
|
|
@ -1098,7 +1098,7 @@ static struct platform_driver w90p910_ether_driver = {
|
|||
.probe = w90p910_ether_probe,
|
||||
.remove = __devexit_p(w90p910_ether_remove),
|
||||
.driver = {
|
||||
.name = "w90p910-emc",
|
||||
.name = "nuc900-emc",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
@ -1119,5 +1119,5 @@ module_exit(w90p910_ether_exit);
|
|||
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
|
||||
MODULE_DESCRIPTION("w90p910 MAC driver!");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:w90p910-emc");
|
||||
MODULE_ALIAS("platform:nuc900-emc");
|
||||
|
||||
|
|
|
@ -952,9 +952,10 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int rc = NETDEV_TX_OK;
|
||||
dma_addr_t mapping;
|
||||
u32 len, entry, ctrl;
|
||||
unsigned long flags;
|
||||
|
||||
len = skb->len;
|
||||
spin_lock_irq(&bp->lock);
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
|
||||
/* This is a hard error, log it. */
|
||||
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
|
||||
|
@ -1027,7 +1028,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dev->trans_start = jiffies;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(&bp->lock);
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
|
||||
return rc;
|
||||
|
||||
|
|
|
@ -401,9 +401,11 @@ static int bnx2_unregister_cnic(struct net_device *dev)
|
|||
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
|
||||
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
|
||||
|
||||
mutex_lock(&bp->cnic_lock);
|
||||
cp->drv_state = 0;
|
||||
bnapi->cnic_present = 0;
|
||||
rcu_assign_pointer(bp->cnic_ops, NULL);
|
||||
mutex_unlock(&bp->cnic_lock);
|
||||
synchronize_rcu();
|
||||
return 0;
|
||||
}
|
||||
|
@ -431,13 +433,13 @@ bnx2_cnic_stop(struct bnx2 *bp)
|
|||
struct cnic_ops *c_ops;
|
||||
struct cnic_ctl_info info;
|
||||
|
||||
rcu_read_lock();
|
||||
c_ops = rcu_dereference(bp->cnic_ops);
|
||||
mutex_lock(&bp->cnic_lock);
|
||||
c_ops = bp->cnic_ops;
|
||||
if (c_ops) {
|
||||
info.cmd = CNIC_CTL_STOP_CMD;
|
||||
c_ops->cnic_ctl(bp->cnic_data, &info);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&bp->cnic_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -446,8 +448,8 @@ bnx2_cnic_start(struct bnx2 *bp)
|
|||
struct cnic_ops *c_ops;
|
||||
struct cnic_ctl_info info;
|
||||
|
||||
rcu_read_lock();
|
||||
c_ops = rcu_dereference(bp->cnic_ops);
|
||||
mutex_lock(&bp->cnic_lock);
|
||||
c_ops = bp->cnic_ops;
|
||||
if (c_ops) {
|
||||
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
|
||||
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
|
||||
|
@ -457,7 +459,7 @@ bnx2_cnic_start(struct bnx2 *bp)
|
|||
info.cmd = CNIC_CTL_START_CMD;
|
||||
c_ops->cnic_ctl(bp->cnic_data, &info);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&bp->cnic_lock);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -7687,6 +7689,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
|
|||
|
||||
spin_lock_init(&bp->phy_lock);
|
||||
spin_lock_init(&bp->indirect_lock);
|
||||
#ifdef BCM_CNIC
|
||||
mutex_init(&bp->cnic_lock);
|
||||
#endif
|
||||
INIT_WORK(&bp->reset_task, bnx2_reset_task);
|
||||
|
||||
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
|
||||
|
|
|
@ -6903,6 +6903,7 @@ struct bnx2 {
|
|||
u32 idle_chk_status_idx;
|
||||
|
||||
#ifdef BCM_CNIC
|
||||
struct mutex cnic_lock;
|
||||
struct cnic_eth_dev cnic_eth_dev;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
|
||||
{
|
||||
atomic_inc(&ulp_ops->ref_count);
|
||||
}
|
||||
|
||||
static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
|
||||
{
|
||||
atomic_dec(&ulp_ops->ref_count);
|
||||
}
|
||||
|
||||
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
|
||||
{
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
|
@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
|
|||
}
|
||||
read_unlock(&cnic_dev_lock);
|
||||
|
||||
atomic_set(&ulp_ops->ref_count, 0);
|
||||
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
|
||||
mutex_unlock(&cnic_lock);
|
||||
|
||||
|
@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
|
|||
int cnic_unregister_driver(int ulp_type)
|
||||
{
|
||||
struct cnic_dev *dev;
|
||||
struct cnic_ulp_ops *ulp_ops;
|
||||
int i = 0;
|
||||
|
||||
if (ulp_type >= MAX_CNIC_ULP_TYPE) {
|
||||
printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
|
||||
|
@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type)
|
|||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&cnic_lock);
|
||||
if (!cnic_ulp_tbl[ulp_type]) {
|
||||
ulp_ops = cnic_ulp_tbl[ulp_type];
|
||||
if (!ulp_ops) {
|
||||
printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
|
||||
"been registered\n", ulp_type);
|
||||
goto out_unlock;
|
||||
|
@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type)
|
|||
|
||||
mutex_unlock(&cnic_lock);
|
||||
synchronize_rcu();
|
||||
while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
|
||||
msleep(100);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (atomic_read(&ulp_ops->ref_count) != 0)
|
||||
printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
|
||||
" to zero.\n", dev->netdev->name);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
|
@ -466,6 +488,7 @@ EXPORT_SYMBOL(cnic_register_driver);
|
|||
static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
|
||||
{
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
int i = 0;
|
||||
|
||||
if (ulp_type >= MAX_CNIC_ULP_TYPE) {
|
||||
printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
|
||||
|
@ -486,6 +509,15 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
|
|||
|
||||
synchronize_rcu();
|
||||
|
||||
while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
|
||||
i < 20) {
|
||||
msleep(100);
|
||||
i++;
|
||||
}
|
||||
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
|
||||
printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
|
||||
" to complete.\n", dev->netdev->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(cnic_unregister_driver);
|
||||
|
@ -1101,18 +1133,23 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
|
|||
if (cp->cnic_uinfo)
|
||||
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
|
||||
|
||||
rcu_read_lock();
|
||||
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
|
||||
struct cnic_ulp_ops *ulp_ops;
|
||||
|
||||
ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
|
||||
if (!ulp_ops)
|
||||
mutex_lock(&cnic_lock);
|
||||
ulp_ops = cp->ulp_ops[if_type];
|
||||
if (!ulp_ops) {
|
||||
mutex_unlock(&cnic_lock);
|
||||
continue;
|
||||
}
|
||||
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
|
||||
mutex_unlock(&cnic_lock);
|
||||
|
||||
if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
|
||||
ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
|
||||
|
||||
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void cnic_ulp_start(struct cnic_dev *dev)
|
||||
|
@ -1120,18 +1157,23 @@ static void cnic_ulp_start(struct cnic_dev *dev)
|
|||
struct cnic_local *cp = dev->cnic_priv;
|
||||
int if_type;
|
||||
|
||||
rcu_read_lock();
|
||||
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
|
||||
struct cnic_ulp_ops *ulp_ops;
|
||||
|
||||
ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
|
||||
if (!ulp_ops || !ulp_ops->cnic_start)
|
||||
mutex_lock(&cnic_lock);
|
||||
ulp_ops = cp->ulp_ops[if_type];
|
||||
if (!ulp_ops || !ulp_ops->cnic_start) {
|
||||
mutex_unlock(&cnic_lock);
|
||||
continue;
|
||||
}
|
||||
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
|
||||
mutex_unlock(&cnic_lock);
|
||||
|
||||
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
|
||||
ulp_ops->cnic_start(cp->ulp_handle[if_type]);
|
||||
|
||||
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
|
||||
|
@ -1141,22 +1183,18 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
|
|||
switch (info->cmd) {
|
||||
case CNIC_CTL_STOP_CMD:
|
||||
cnic_hold(dev);
|
||||
mutex_lock(&cnic_lock);
|
||||
|
||||
cnic_ulp_stop(dev);
|
||||
cnic_stop_hw(dev);
|
||||
|
||||
mutex_unlock(&cnic_lock);
|
||||
cnic_put(dev);
|
||||
break;
|
||||
case CNIC_CTL_START_CMD:
|
||||
cnic_hold(dev);
|
||||
mutex_lock(&cnic_lock);
|
||||
|
||||
if (!cnic_start_hw(dev))
|
||||
cnic_ulp_start(dev);
|
||||
|
||||
mutex_unlock(&cnic_lock);
|
||||
cnic_put(dev);
|
||||
break;
|
||||
default:
|
||||
|
@ -1170,19 +1208,23 @@ static void cnic_ulp_init(struct cnic_dev *dev)
|
|||
int i;
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
|
||||
struct cnic_ulp_ops *ulp_ops;
|
||||
|
||||
ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
|
||||
if (!ulp_ops || !ulp_ops->cnic_init)
|
||||
mutex_lock(&cnic_lock);
|
||||
ulp_ops = cnic_ulp_tbl[i];
|
||||
if (!ulp_ops || !ulp_ops->cnic_init) {
|
||||
mutex_unlock(&cnic_lock);
|
||||
continue;
|
||||
}
|
||||
ulp_get(ulp_ops);
|
||||
mutex_unlock(&cnic_lock);
|
||||
|
||||
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
|
||||
ulp_ops->cnic_init(dev);
|
||||
|
||||
ulp_put(ulp_ops);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void cnic_ulp_exit(struct cnic_dev *dev)
|
||||
|
@ -1190,19 +1232,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
|
|||
int i;
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
|
||||
struct cnic_ulp_ops *ulp_ops;
|
||||
|
||||
ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
|
||||
if (!ulp_ops || !ulp_ops->cnic_exit)
|
||||
mutex_lock(&cnic_lock);
|
||||
ulp_ops = cnic_ulp_tbl[i];
|
||||
if (!ulp_ops || !ulp_ops->cnic_exit) {
|
||||
mutex_unlock(&cnic_lock);
|
||||
continue;
|
||||
}
|
||||
ulp_get(ulp_ops);
|
||||
mutex_unlock(&cnic_lock);
|
||||
|
||||
if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
|
||||
ulp_ops->cnic_exit(dev);
|
||||
|
||||
ulp_put(ulp_ops);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int cnic_cm_offload_pg(struct cnic_sock *csk)
|
||||
|
@ -2418,6 +2464,37 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cnic_register_netdev(struct cnic_dev *dev)
|
||||
{
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
struct cnic_eth_dev *ethdev = cp->ethdev;
|
||||
int err;
|
||||
|
||||
if (!ethdev)
|
||||
return -ENODEV;
|
||||
|
||||
if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
|
||||
return 0;
|
||||
|
||||
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
|
||||
if (err)
|
||||
printk(KERN_ERR PFX "%s: register_cnic failed\n",
|
||||
dev->netdev->name);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void cnic_unregister_netdev(struct cnic_dev *dev)
|
||||
{
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
struct cnic_eth_dev *ethdev = cp->ethdev;
|
||||
|
||||
if (!ethdev)
|
||||
return;
|
||||
|
||||
ethdev->drv_unregister_cnic(dev->netdev);
|
||||
}
|
||||
|
||||
static int cnic_start_hw(struct cnic_dev *dev)
|
||||
{
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
|
@ -2427,13 +2504,6 @@ static int cnic_start_hw(struct cnic_dev *dev)
|
|||
if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
|
||||
return -EALREADY;
|
||||
|
||||
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR PFX "%s: register_cnic failed\n",
|
||||
dev->netdev->name);
|
||||
goto err2;
|
||||
}
|
||||
|
||||
dev->regview = ethdev->io_base;
|
||||
cp->chip_id = ethdev->chip_id;
|
||||
pci_dev_get(dev->pcidev);
|
||||
|
@ -2463,18 +2533,13 @@ static int cnic_start_hw(struct cnic_dev *dev)
|
|||
return 0;
|
||||
|
||||
err1:
|
||||
ethdev->drv_unregister_cnic(dev->netdev);
|
||||
cp->free_resc(dev);
|
||||
pci_dev_put(dev->pcidev);
|
||||
err2:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
|
||||
{
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
struct cnic_eth_dev *ethdev = cp->ethdev;
|
||||
|
||||
cnic_disable_bnx2_int_sync(dev);
|
||||
|
||||
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
|
||||
|
@ -2486,8 +2551,6 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
|
|||
cnic_setup_5709_context(dev, 0);
|
||||
cnic_free_irq(dev);
|
||||
|
||||
ethdev->drv_unregister_cnic(dev->netdev);
|
||||
|
||||
cnic_free_resc(dev);
|
||||
}
|
||||
|
||||
|
@ -2568,7 +2631,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
|
|||
probe = symbol_get(bnx2_cnic_probe);
|
||||
if (probe) {
|
||||
ethdev = (*probe)(dev);
|
||||
symbol_put_addr(probe);
|
||||
symbol_put(bnx2_cnic_probe);
|
||||
}
|
||||
if (!ethdev)
|
||||
return NULL;
|
||||
|
@ -2671,10 +2734,12 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
|
|||
else if (event == NETDEV_UNREGISTER)
|
||||
cnic_ulp_exit(dev);
|
||||
else if (event == NETDEV_UP) {
|
||||
mutex_lock(&cnic_lock);
|
||||
if (cnic_register_netdev(dev) != 0) {
|
||||
cnic_put(dev);
|
||||
goto done;
|
||||
}
|
||||
if (!cnic_start_hw(dev))
|
||||
cnic_ulp_start(dev);
|
||||
mutex_unlock(&cnic_lock);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -2693,10 +2758,9 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
|
|||
rcu_read_unlock();
|
||||
|
||||
if (event == NETDEV_GOING_DOWN) {
|
||||
mutex_lock(&cnic_lock);
|
||||
cnic_ulp_stop(dev);
|
||||
cnic_stop_hw(dev);
|
||||
mutex_unlock(&cnic_lock);
|
||||
cnic_unregister_netdev(dev);
|
||||
} else if (event == NETDEV_UNREGISTER) {
|
||||
write_lock(&cnic_dev_lock);
|
||||
list_del_init(&dev->list);
|
||||
|
@ -2728,6 +2792,7 @@ static void cnic_release(void)
|
|||
}
|
||||
|
||||
cnic_ulp_exit(dev);
|
||||
cnic_unregister_netdev(dev);
|
||||
list_del_init(&dev->list);
|
||||
cnic_free_dev(dev);
|
||||
}
|
||||
|
|
|
@ -176,6 +176,7 @@ struct cnic_local {
|
|||
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
|
||||
#define ULP_F_INIT 0
|
||||
#define ULP_F_START 1
|
||||
#define ULP_F_CALL_PENDING 2
|
||||
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
|
||||
|
||||
/* protected by ulp_lock */
|
||||
|
|
|
@ -290,6 +290,7 @@ struct cnic_ulp_ops {
|
|||
void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
|
||||
char *data, u16 data_size);
|
||||
struct module *owner;
|
||||
atomic_t ref_count;
|
||||
};
|
||||
|
||||
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
|
||||
|
|
|
@ -1900,7 +1900,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
|
|||
nic->ru_running = RU_SUSPENDED;
|
||||
pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
|
||||
sizeof(struct rfd),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
PCI_DMA_FROMDEVICE);
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
|
|
@ -4539,8 +4539,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||
/* Allow time for pending master requests to run */
|
||||
e1000e_disable_pcie_master(&adapter->hw);
|
||||
|
||||
if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) &&
|
||||
!(hw->mac.ops.check_mng_mode(hw))) {
|
||||
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
|
||||
/* enable wakeup by the PHY */
|
||||
retval = e1000_init_phy_wakeup(adapter, wufc);
|
||||
if (retval)
|
||||
|
@ -4558,7 +4557,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||
*enable_wake = !!wufc;
|
||||
|
||||
/* make sure adapter isn't asleep if manageability is enabled */
|
||||
if (adapter->flags & FLAG_MNG_PT_ENABLED)
|
||||
if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
|
||||
(hw->mac.ops.check_mng_mode(hw)))
|
||||
*enable_wake = true;
|
||||
|
||||
if (adapter->hw.phy.type == e1000_phy_igp_3)
|
||||
|
@ -4671,14 +4671,6 @@ static int e1000_resume(struct pci_dev *pdev)
|
|||
return err;
|
||||
}
|
||||
|
||||
/* AER (Advanced Error Reporting) hooks */
|
||||
err = pci_enable_pcie_error_reporting(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
|
||||
"0x%x\n", err);
|
||||
/* non-fatal, continue */
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
|
@ -4991,6 +4983,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
if (err)
|
||||
goto err_pci_reg;
|
||||
|
||||
/* AER (Advanced Error Reporting) hooks */
|
||||
err = pci_enable_pcie_error_reporting(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
|
||||
"0x%x\n", err);
|
||||
/* non-fatal, continue */
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
/* PCI config space info */
|
||||
err = pci_save_state(pdev);
|
||||
|
|
|
@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
{
|
||||
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
|
||||
struct bcom_fec_bd *bd;
|
||||
unsigned long flags;
|
||||
|
||||
if (bcom_queue_full(priv->tx_dmatsk)) {
|
||||
if (net_ratelimit())
|
||||
|
@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
bd = (struct bcom_fec_bd *)
|
||||
|
@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&priv->lock);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
|
|
@ -490,6 +490,7 @@ static int gfar_remove(struct of_device *ofdev)
|
|||
|
||||
dev_set_drvdata(&ofdev->dev, NULL);
|
||||
|
||||
unregister_netdev(dev);
|
||||
iounmap(priv->regs);
|
||||
free_netdev(priv->ndev);
|
||||
|
||||
|
|
|
@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev)
|
|||
|
||||
free_irq(dev->emac_irq, dev);
|
||||
|
||||
netif_carrier_off(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -205,9 +204,6 @@ static const struct net_device_ops au1k_irda_netdev_ops = {
|
|||
.ndo_start_xmit = au1k_irda_hard_xmit,
|
||||
.ndo_tx_timeout = au1k_tx_timeout,
|
||||
.ndo_do_ioctl = au1k_irda_ioctl,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
static int au1k_irda_net_init(struct net_device *dev)
|
||||
|
|
|
@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = {
|
|||
.ndo_stop = pxa_irda_stop,
|
||||
.ndo_start_xmit = pxa_irda_hard_xmit,
|
||||
.ndo_do_ioctl = pxa_irda_ioctl,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
static int pxa_irda_probe(struct platform_device *pdev)
|
||||
|
@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
|
|||
if (!dev)
|
||||
goto err_mem_3;
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
si = netdev_priv(dev);
|
||||
si->dev = &pdev->dev;
|
||||
si->pdata = pdev->dev.platform_data;
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -881,9 +880,6 @@ static const struct net_device_ops sa1100_irda_netdev_ops = {
|
|||
.ndo_stop = sa1100_irda_stop,
|
||||
.ndo_start_xmit = sa1100_irda_hard_xmit,
|
||||
.ndo_do_ioctl = sa1100_irda_ioctl,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
static int sa1100_irda_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct ixpdev_priv *ip = netdev_priv(dev);
|
||||
struct ixpdev_tx_desc *desc;
|
||||
int entry;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(skb->len > PAGE_SIZE)) {
|
||||
/* @@@ Count drops. */
|
||||
|
@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
local_irq_disable();
|
||||
local_irq_save(flags);
|
||||
ip->tx_queue_entries++;
|
||||
if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
|
||||
netif_stop_queue(dev);
|
||||
local_irq_enable();
|
||||
local_irq_restore(flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
|
|
@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dma_addr_t mapping;
|
||||
unsigned int len, entry;
|
||||
u32 ctrl;
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef DEBUG
|
||||
int i;
|
||||
|
@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
#endif
|
||||
|
||||
len = skb->len;
|
||||
spin_lock_irq(&bp->lock);
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
|
||||
/* This is a hard error, log it. */
|
||||
if (TX_BUFFS_AVAIL(bp) < 1) {
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irq(&bp->lock);
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
dev_err(&bp->pdev->dev,
|
||||
"BUG! Tx Ring full when queue awake!\n");
|
||||
dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
|
||||
|
@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (TX_BUFFS_AVAIL(bp) < 1)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
spin_unlock_irq(&bp->lock);
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
|
|
|
@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
|
|||
{
|
||||
struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
|
||||
struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
|
||||
unsigned long flags;
|
||||
|
||||
/* If we don't have a pending timer, set one up to catch our recent
|
||||
post in case the interface becomes idle */
|
||||
|
@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
|
|||
|
||||
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
|
||||
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
|
||||
if (spin_trylock_irq(&ring->comp_lock)) {
|
||||
if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
|
||||
mlx4_en_process_tx_cq(priv->dev, cq);
|
||||
spin_unlock_irq(&ring->comp_lock);
|
||||
spin_unlock_irqrestore(&ring->comp_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
|
|||
/* this enables an interrupt in the interrupt mask register */
|
||||
#define SMC_ENABLE_INT(lp, x) do { \
|
||||
unsigned char mask; \
|
||||
spin_lock_irq(&lp->lock); \
|
||||
unsigned long smc_enable_flags; \
|
||||
spin_lock_irqsave(&lp->lock, smc_enable_flags); \
|
||||
mask = SMC_GET_INT_MASK(lp); \
|
||||
mask |= (x); \
|
||||
SMC_SET_INT_MASK(lp, mask); \
|
||||
spin_unlock_irq(&lp->lock); \
|
||||
spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
|
||||
} while (0)
|
||||
|
||||
/* this disables an interrupt from the interrupt mask register */
|
||||
#define SMC_DISABLE_INT(lp, x) do { \
|
||||
unsigned char mask; \
|
||||
spin_lock_irq(&lp->lock); \
|
||||
unsigned long smc_disable_flags; \
|
||||
spin_lock_irqsave(&lp->lock, smc_disable_flags); \
|
||||
mask = SMC_GET_INT_MASK(lp); \
|
||||
mask &= ~(x); \
|
||||
SMC_SET_INT_MASK(lp, mask); \
|
||||
spin_unlock_irq(&lp->lock); \
|
||||
spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev)
|
|||
* any other concurrent access and C would always interrupt B. But life
|
||||
* isn't that easy in a SMP world...
|
||||
*/
|
||||
#define smc_special_trylock(lock) \
|
||||
#define smc_special_trylock(lock, flags) \
|
||||
({ \
|
||||
int __ret; \
|
||||
local_irq_disable(); \
|
||||
local_irq_save(flags); \
|
||||
__ret = spin_trylock(lock); \
|
||||
if (!__ret) \
|
||||
local_irq_enable(); \
|
||||
local_irq_restore(flags); \
|
||||
__ret; \
|
||||
})
|
||||
#define smc_special_lock(lock) spin_lock_irq(lock)
|
||||
#define smc_special_unlock(lock) spin_unlock_irq(lock)
|
||||
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
|
||||
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
|
||||
#else
|
||||
#define smc_special_trylock(lock) (1)
|
||||
#define smc_special_lock(lock) do { } while (0)
|
||||
#define smc_special_unlock(lock) do { } while (0)
|
||||
#define smc_special_trylock(lock, flags) (1)
|
||||
#define smc_special_lock(lock, flags) do { } while (0)
|
||||
#define smc_special_unlock(lock, flags) do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
|
|||
struct sk_buff *skb;
|
||||
unsigned int packet_no, len;
|
||||
unsigned char *buf;
|
||||
unsigned long flags;
|
||||
|
||||
DBG(3, "%s: %s\n", dev->name, __func__);
|
||||
|
||||
if (!smc_special_trylock(&lp->lock)) {
|
||||
if (!smc_special_trylock(&lp->lock, flags)) {
|
||||
netif_stop_queue(dev);
|
||||
tasklet_schedule(&lp->tx_task);
|
||||
return;
|
||||
|
@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
|
|||
|
||||
skb = lp->pending_tx_skb;
|
||||
if (unlikely(!skb)) {
|
||||
smc_special_unlock(&lp->lock);
|
||||
smc_special_unlock(&lp->lock, flags);
|
||||
return;
|
||||
}
|
||||
lp->pending_tx_skb = NULL;
|
||||
|
@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
|
|||
printk("%s: Memory allocation failed.\n", dev->name);
|
||||
dev->stats.tx_errors++;
|
||||
dev->stats.tx_fifo_errors++;
|
||||
smc_special_unlock(&lp->lock);
|
||||
smc_special_unlock(&lp->lock, flags);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
|
|||
|
||||
/* queue the packet for TX */
|
||||
SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
|
||||
smc_special_unlock(&lp->lock);
|
||||
smc_special_unlock(&lp->lock, flags);
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
dev->stats.tx_packets++;
|
||||
|
@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct smc_local *lp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = lp->base;
|
||||
unsigned int numPages, poll_count, status;
|
||||
unsigned long flags;
|
||||
|
||||
DBG(3, "%s: %s\n", dev->name, __func__);
|
||||
|
||||
|
@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
smc_special_lock(&lp->lock);
|
||||
smc_special_lock(&lp->lock, flags);
|
||||
|
||||
/* now, try to allocate the memory */
|
||||
SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
|
||||
|
@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
} while (--poll_count);
|
||||
|
||||
smc_special_unlock(&lp->lock);
|
||||
smc_special_unlock(&lp->lock, flags);
|
||||
|
||||
lp->pending_tx_skb = skb;
|
||||
if (!poll_count) {
|
||||
|
|
|
@ -653,8 +653,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int entry;
|
||||
u32 flag;
|
||||
dma_addr_t mapping;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&tp->lock);
|
||||
spin_lock_irqsave(&tp->lock, flags);
|
||||
|
||||
/* Calculate the next Tx descriptor entry. */
|
||||
entry = tp->cur_tx % TX_RING_SIZE;
|
||||
|
@ -689,7 +690,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Trigger an immediate transmit demand. */
|
||||
iowrite32(0, tp->base_addr + CSR1);
|
||||
|
||||
spin_unlock_irq(&tp->lock);
|
||||
spin_unlock_irqrestore(&tp->lock, flags);
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
|
|
|
@ -3084,10 +3084,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
u8 __iomem *bd; /* BD pointer */
|
||||
u32 bd_status;
|
||||
u8 txQ = 0;
|
||||
unsigned long flags;
|
||||
|
||||
ugeth_vdbg("%s: IN", __func__);
|
||||
|
||||
spin_lock_irq(&ugeth->lock);
|
||||
spin_lock_irqsave(&ugeth->lock, flags);
|
||||
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
|
||||
|
@ -3144,7 +3145,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
uccf = ugeth->uccf;
|
||||
out_be16(uccf->p_utodr, UCC_FAST_TOD);
|
||||
#endif
|
||||
spin_unlock_irq(&ugeth->lock);
|
||||
spin_unlock_irqrestore(&ugeth->lock, flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
|
|
@ -250,6 +250,8 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
|
|||
DEFAULT_GPIO_RESET )
|
||||
PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
|
||||
DEFAULT_GPIO_RESET | PEGASUS_II )
|
||||
PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
|
||||
DEFAULT_GPIO_RESET | PEGASUS_II )
|
||||
PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
|
||||
DEFAULT_GPIO_RESET)
|
||||
PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
|
||||
|
|
|
@ -1220,6 +1220,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
|
|||
struct rhine_private *rp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = rp->base;
|
||||
unsigned entry;
|
||||
unsigned long flags;
|
||||
|
||||
/* Caution: the write order is important here, set the field
|
||||
with the "ownership" bits last. */
|
||||
|
@ -1263,7 +1264,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
|
|||
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
|
||||
|
||||
/* lock eth irq */
|
||||
spin_lock_irq(&rp->lock);
|
||||
spin_lock_irqsave(&rp->lock, flags);
|
||||
wmb();
|
||||
rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
|
||||
wmb();
|
||||
|
@ -1282,7 +1283,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
|
|||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
spin_unlock_irq(&rp->lock);
|
||||
spin_unlock_irqrestore(&rp->lock, flags);
|
||||
|
||||
if (debug > 4) {
|
||||
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
|
||||
|
|
|
@ -1789,7 +1789,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
|
|||
* mode
|
||||
*/
|
||||
if (vptr->rev_id < REV_ID_VT3216_A0) {
|
||||
if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
|
||||
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
|
||||
BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
|
||||
else
|
||||
BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
|
||||
|
|
|
@ -70,6 +70,9 @@ struct virtnet_info
|
|||
struct sk_buff_head recv;
|
||||
struct sk_buff_head send;
|
||||
|
||||
/* Work struct for refilling if we run low on memory. */
|
||||
struct delayed_work refill;
|
||||
|
||||
/* Chain pages by the private ptr. */
|
||||
struct page *pages;
|
||||
};
|
||||
|
@ -273,19 +276,22 @@ drop:
|
|||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void try_fill_recv_maxbufs(struct virtnet_info *vi)
|
||||
static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct scatterlist sg[2+MAX_SKB_FRAGS];
|
||||
int num, err, i;
|
||||
bool oom = false;
|
||||
|
||||
sg_init_table(sg, 2+MAX_SKB_FRAGS);
|
||||
for (;;) {
|
||||
struct virtio_net_hdr *hdr;
|
||||
|
||||
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
|
||||
if (unlikely(!skb))
|
||||
if (unlikely(!skb)) {
|
||||
oom = true;
|
||||
break;
|
||||
}
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
skb_put(skb, MAX_PACKET_LEN);
|
||||
|
@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
|
|||
if (vi->big_packets) {
|
||||
for (i = 0; i < MAX_SKB_FRAGS; i++) {
|
||||
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
|
||||
f->page = get_a_page(vi, GFP_ATOMIC);
|
||||
f->page = get_a_page(vi, gfp);
|
||||
if (!f->page)
|
||||
break;
|
||||
|
||||
|
@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
|
|||
if (unlikely(vi->num > vi->max))
|
||||
vi->max = vi->num;
|
||||
vi->rvq->vq_ops->kick(vi->rvq);
|
||||
return !oom;
|
||||
}
|
||||
|
||||
static void try_fill_recv(struct virtnet_info *vi)
|
||||
/* Returns false if we couldn't fill entirely (OOM). */
|
||||
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct scatterlist sg[1];
|
||||
int err;
|
||||
bool oom = false;
|
||||
|
||||
if (!vi->mergeable_rx_bufs) {
|
||||
try_fill_recv_maxbufs(vi);
|
||||
return;
|
||||
}
|
||||
if (!vi->mergeable_rx_bufs)
|
||||
return try_fill_recv_maxbufs(vi, gfp);
|
||||
|
||||
for (;;) {
|
||||
skb_frag_t *f;
|
||||
|
||||
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
|
||||
if (unlikely(!skb))
|
||||
if (unlikely(!skb)) {
|
||||
oom = true;
|
||||
break;
|
||||
}
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
|
||||
f = &skb_shinfo(skb)->frags[0];
|
||||
f->page = get_a_page(vi, GFP_ATOMIC);
|
||||
f->page = get_a_page(vi, gfp);
|
||||
if (!f->page) {
|
||||
oom = true;
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
|
@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
|
|||
if (unlikely(vi->num > vi->max))
|
||||
vi->max = vi->num;
|
||||
vi->rvq->vq_ops->kick(vi->rvq);
|
||||
return !oom;
|
||||
}
|
||||
|
||||
static void skb_recv_done(struct virtqueue *rvq)
|
||||
|
@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
|
|||
}
|
||||
}
|
||||
|
||||
static void refill_work(struct work_struct *work)
|
||||
{
|
||||
struct virtnet_info *vi;
|
||||
bool still_empty;
|
||||
|
||||
vi = container_of(work, struct virtnet_info, refill.work);
|
||||
napi_disable(&vi->napi);
|
||||
try_fill_recv(vi, GFP_KERNEL);
|
||||
still_empty = (vi->num == 0);
|
||||
napi_enable(&vi->napi);
|
||||
|
||||
/* In theory, this can happen: if we don't get any buffers in
|
||||
* we will *never* try to fill again. */
|
||||
if (still_empty)
|
||||
schedule_delayed_work(&vi->refill, HZ/2);
|
||||
}
|
||||
|
||||
static int virtnet_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
|
||||
|
@ -400,10 +428,10 @@ again:
|
|||
received++;
|
||||
}
|
||||
|
||||
/* FIXME: If we oom and completely run out of inbufs, we need
|
||||
* to start a timer trying to fill more. */
|
||||
if (vi->num < vi->max / 2)
|
||||
try_fill_recv(vi);
|
||||
if (vi->num < vi->max / 2) {
|
||||
if (!try_fill_recv(vi, GFP_ATOMIC))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
|
||||
/* Out of packets? */
|
||||
if (received < budget) {
|
||||
|
@ -894,6 +922,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
vi->vdev = vdev;
|
||||
vdev->priv = vi;
|
||||
vi->pages = NULL;
|
||||
INIT_DELAYED_WORK(&vi->refill, refill_work);
|
||||
|
||||
/* If they give us a callback when all buffers are done, we don't need
|
||||
* the timer. */
|
||||
|
@ -942,7 +971,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
}
|
||||
|
||||
/* Last of all, set up some receive buffers. */
|
||||
try_fill_recv(vi);
|
||||
try_fill_recv(vi, GFP_KERNEL);
|
||||
|
||||
/* If we didn't even get one input buffer, we're useless. */
|
||||
if (vi->num == 0) {
|
||||
|
@ -959,6 +988,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
|
||||
unregister:
|
||||
unregister_netdev(dev);
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
free_vqs:
|
||||
vdev->config->del_vqs(vdev);
|
||||
free:
|
||||
|
@ -987,6 +1017,7 @@ static void virtnet_remove(struct virtio_device *vdev)
|
|||
BUG_ON(vi->num != 0);
|
||||
|
||||
unregister_netdev(vi->dev);
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
||||
vdev->config->del_vqs(vi->vdev);
|
||||
|
||||
|
|
|
@ -2893,45 +2893,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
|
||||
u32 src_phys, u32 dest_address, u32 length)
|
||||
static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
|
||||
int nr, u32 dest_address, u32 len)
|
||||
{
|
||||
u32 bytes_left = length;
|
||||
u32 src_offset = 0;
|
||||
u32 dest_offset = 0;
|
||||
int status = 0;
|
||||
int ret, i;
|
||||
u32 size;
|
||||
|
||||
IPW_DEBUG_FW(">> \n");
|
||||
IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
|
||||
src_phys, dest_address, length);
|
||||
while (bytes_left > CB_MAX_LENGTH) {
|
||||
status = ipw_fw_dma_add_command_block(priv,
|
||||
src_phys + src_offset,
|
||||
dest_address +
|
||||
dest_offset,
|
||||
CB_MAX_LENGTH, 0, 0);
|
||||
if (status) {
|
||||
IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
|
||||
nr, dest_address, len);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
|
||||
ret = ipw_fw_dma_add_command_block(priv, src_address[i],
|
||||
dest_address +
|
||||
i * CB_MAX_LENGTH, size,
|
||||
0, 0);
|
||||
if (ret) {
|
||||
IPW_DEBUG_FW_INFO(": Failed\n");
|
||||
return -1;
|
||||
} else
|
||||
IPW_DEBUG_FW_INFO(": Added new cb\n");
|
||||
|
||||
src_offset += CB_MAX_LENGTH;
|
||||
dest_offset += CB_MAX_LENGTH;
|
||||
bytes_left -= CB_MAX_LENGTH;
|
||||
}
|
||||
|
||||
/* add the buffer tail */
|
||||
if (bytes_left > 0) {
|
||||
status =
|
||||
ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
|
||||
dest_address + dest_offset,
|
||||
bytes_left, 0, 0);
|
||||
if (status) {
|
||||
IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
|
||||
return -1;
|
||||
} else
|
||||
IPW_DEBUG_FW_INFO
|
||||
(": Adding new cb - the buffer tail\n");
|
||||
}
|
||||
|
||||
IPW_DEBUG_FW("<< \n");
|
||||
|
@ -3179,59 +3161,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
|
|||
|
||||
static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
|
||||
{
|
||||
int rc = -1;
|
||||
int ret = -1;
|
||||
int offset = 0;
|
||||
struct fw_chunk *chunk;
|
||||
dma_addr_t shared_phys;
|
||||
u8 *shared_virt;
|
||||
int total_nr = 0;
|
||||
int i;
|
||||
struct pci_pool *pool;
|
||||
u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
|
||||
dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
|
||||
|
||||
IPW_DEBUG_TRACE("<< : \n");
|
||||
shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
|
||||
|
||||
if (!shared_virt)
|
||||
pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
|
||||
if (!pool) {
|
||||
IPW_ERROR("pci_pool_create failed\n");
|
||||
return -ENOMEM;
|
||||
|
||||
memmove(shared_virt, data, len);
|
||||
}
|
||||
|
||||
/* Start the Dma */
|
||||
rc = ipw_fw_dma_enable(priv);
|
||||
ret = ipw_fw_dma_enable(priv);
|
||||
|
||||
/* the DMA is already ready this would be a bug. */
|
||||
BUG_ON(priv->sram_desc.last_cb_index > 0);
|
||||
|
||||
do {
|
||||
u32 chunk_len;
|
||||
u8 *start;
|
||||
int size;
|
||||
int nr = 0;
|
||||
|
||||
chunk = (struct fw_chunk *)(data + offset);
|
||||
offset += sizeof(struct fw_chunk);
|
||||
chunk_len = le32_to_cpu(chunk->length);
|
||||
start = data + offset;
|
||||
|
||||
nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
|
||||
for (i = 0; i < nr; i++) {
|
||||
virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
|
||||
&phys[total_nr]);
|
||||
if (!virts[total_nr]) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
|
||||
CB_MAX_LENGTH);
|
||||
memcpy(virts[total_nr], start, size);
|
||||
start += size;
|
||||
total_nr++;
|
||||
/* We don't support fw chunk larger than 64*8K */
|
||||
BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
|
||||
}
|
||||
|
||||
/* build DMA packet and queue up for sending */
|
||||
/* dma to chunk->address, the chunk->length bytes from data +
|
||||
* offeset*/
|
||||
/* Dma loading */
|
||||
rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
|
||||
le32_to_cpu(chunk->address),
|
||||
le32_to_cpu(chunk->length));
|
||||
if (rc) {
|
||||
ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
|
||||
nr, le32_to_cpu(chunk->address),
|
||||
chunk_len);
|
||||
if (ret) {
|
||||
IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
offset += le32_to_cpu(chunk->length);
|
||||
offset += chunk_len;
|
||||
} while (offset < len);
|
||||
|
||||
/* Run the DMA and wait for the answer */
|
||||
rc = ipw_fw_dma_kick(priv);
|
||||
if (rc) {
|
||||
ret = ipw_fw_dma_kick(priv);
|
||||
if (ret) {
|
||||
IPW_ERROR("dmaKick Failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ipw_fw_dma_wait(priv);
|
||||
if (rc) {
|
||||
ret = ipw_fw_dma_wait(priv);
|
||||
if (ret) {
|
||||
IPW_ERROR("dmaWaitSync Failed\n");
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
|
||||
return rc;
|
||||
out:
|
||||
for (i = 0; i < total_nr; i++)
|
||||
pci_pool_free(pool, virts[i], phys[i]);
|
||||
|
||||
pci_pool_destroy(pool);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* stop nic */
|
||||
|
|
|
@ -644,7 +644,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
|
|||
int err = 0;
|
||||
u8 tsc_arr[4][ORINOCO_SEQ_LEN];
|
||||
|
||||
if ((key < 0) || (key > 4))
|
||||
if ((key < 0) || (key >= 4))
|
||||
return -EINVAL;
|
||||
|
||||
err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
|
||||
|
|
|
@ -871,6 +871,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
|
|||
priv->aifsn[3] = 3; /* AIFSN[AC_BE] */
|
||||
rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
|
||||
|
||||
/* ENEDCA flag must always be set, transmit issues? */
|
||||
rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1176,13 +1179,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
|
|||
rtl818x_iowrite8(priv, &priv->map->BSSID[i],
|
||||
info->bssid[i]);
|
||||
|
||||
if (priv->is_rtl8187b)
|
||||
reg = RTL818X_MSR_ENEDCA;
|
||||
else
|
||||
reg = 0;
|
||||
|
||||
if (is_valid_ether_addr(info->bssid)) {
|
||||
reg = RTL818X_MSR_INFRA;
|
||||
if (priv->is_rtl8187b)
|
||||
reg |= RTL818X_MSR_ENEDCA;
|
||||
reg |= RTL818X_MSR_INFRA;
|
||||
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
|
||||
} else {
|
||||
reg = RTL818X_MSR_NO_LINK;
|
||||
reg |= RTL818X_MSR_NO_LINK;
|
||||
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
|
||||
}
|
||||
|
||||
|
|
|
@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
|||
static int yellowfin_open(struct net_device *dev);
|
||||
static void yellowfin_timer(unsigned long data);
|
||||
static void yellowfin_tx_timeout(struct net_device *dev);
|
||||
static void yellowfin_init_ring(struct net_device *dev);
|
||||
static int yellowfin_init_ring(struct net_device *dev);
|
||||
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
|
||||
|
@ -574,19 +574,24 @@ static int yellowfin_open(struct net_device *dev)
|
|||
{
|
||||
struct yellowfin_private *yp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = yp->base;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
/* Reset the chip. */
|
||||
iowrite32(0x80000000, ioaddr + DMACtrl);
|
||||
|
||||
i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
|
||||
if (i) return i;
|
||||
ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (yellowfin_debug > 1)
|
||||
printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
|
||||
dev->name, dev->irq);
|
||||
|
||||
yellowfin_init_ring(dev);
|
||||
ret = yellowfin_init_ring(dev);
|
||||
if (ret) {
|
||||
free_irq(dev->irq, dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
|
||||
iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
|
||||
|
@ -726,10 +731,10 @@ static void yellowfin_tx_timeout(struct net_device *dev)
|
|||
}
|
||||
|
||||
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
|
||||
static void yellowfin_init_ring(struct net_device *dev)
|
||||
static int yellowfin_init_ring(struct net_device *dev)
|
||||
{
|
||||
struct yellowfin_private *yp = netdev_priv(dev);
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
yp->tx_full = 0;
|
||||
yp->cur_rx = yp->cur_tx = 0;
|
||||
|
@ -754,6 +759,11 @@ static void yellowfin_init_ring(struct net_device *dev)
|
|||
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
|
||||
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||
}
|
||||
if (i != RX_RING_SIZE) {
|
||||
for (j = 0; j < i; j++)
|
||||
dev_kfree_skb(yp->rx_skbuff[j]);
|
||||
return -ENOMEM;
|
||||
}
|
||||
yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
|
||||
yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
||||
|
||||
|
@ -770,8 +780,6 @@ static void yellowfin_init_ring(struct net_device *dev)
|
|||
yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
|
||||
#else
|
||||
{
|
||||
int j;
|
||||
|
||||
/* Tx ring needs a pair of descriptors, the second for the status. */
|
||||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
j = 2*i;
|
||||
|
@ -806,7 +814,7 @@ static void yellowfin_init_ring(struct net_device *dev)
|
|||
}
|
||||
#endif
|
||||
yp->tx_tail_desc = &yp->tx_status[0];
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
|
||||
|
|
|
@ -19,6 +19,11 @@ enum {
|
|||
* @packets: number of seen packets
|
||||
*/
|
||||
struct gnet_stats_basic
|
||||
{
|
||||
__u64 bytes;
|
||||
__u32 packets;
|
||||
};
|
||||
struct gnet_stats_basic_packed
|
||||
{
|
||||
__u64 bytes;
|
||||
__u32 packets;
|
||||
|
|
|
@ -16,7 +16,7 @@ struct tcf_common {
|
|||
u32 tcfc_capab;
|
||||
int tcfc_action;
|
||||
struct tcf_t tcfc_tm;
|
||||
struct gnet_stats_basic tcfc_bstats;
|
||||
struct gnet_stats_basic_packed tcfc_bstats;
|
||||
struct gnet_stats_queue tcfc_qstats;
|
||||
struct gnet_stats_rate_est tcfc_rate_est;
|
||||
spinlock_t tcfc_lock;
|
||||
|
|
|
@ -28,7 +28,7 @@ extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
|
|||
spinlock_t *lock, struct gnet_dump *d);
|
||||
|
||||
extern int gnet_stats_copy_basic(struct gnet_dump *d,
|
||||
struct gnet_stats_basic *b);
|
||||
struct gnet_stats_basic_packed *b);
|
||||
extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
|
||||
struct gnet_stats_rate_est *r);
|
||||
extern int gnet_stats_copy_queue(struct gnet_dump *d,
|
||||
|
@ -37,14 +37,14 @@ extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
|
|||
|
||||
extern int gnet_stats_finish_copy(struct gnet_dump *d);
|
||||
|
||||
extern int gen_new_estimator(struct gnet_stats_basic *bstats,
|
||||
extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_rate_est *rate_est,
|
||||
spinlock_t *stats_lock, struct nlattr *opt);
|
||||
extern void gen_kill_estimator(struct gnet_stats_basic *bstats,
|
||||
extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_rate_est *rate_est);
|
||||
extern int gen_replace_estimator(struct gnet_stats_basic *bstats,
|
||||
extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_rate_est *rate_est,
|
||||
spinlock_t *stats_lock, struct nlattr *opt);
|
||||
extern bool gen_estimator_active(const struct gnet_stats_basic *bstats,
|
||||
extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
|
||||
const struct gnet_stats_rate_est *rate_est);
|
||||
#endif
|
||||
|
|
|
@ -8,7 +8,7 @@ struct xt_rateest {
|
|||
spinlock_t lock;
|
||||
struct gnet_estimator params;
|
||||
struct gnet_stats_rate_est rstats;
|
||||
struct gnet_stats_basic bstats;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
};
|
||||
|
||||
extern struct xt_rateest *xt_rateest_lookup(const char *name);
|
||||
|
|
|
@ -73,7 +73,7 @@ struct Qdisc
|
|||
*/
|
||||
unsigned long state;
|
||||
struct sk_buff_head q;
|
||||
struct gnet_stats_basic bstats;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
};
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
struct gen_estimator
|
||||
{
|
||||
struct list_head list;
|
||||
struct gnet_stats_basic *bstats;
|
||||
struct gnet_stats_basic_packed *bstats;
|
||||
struct gnet_stats_rate_est *rate_est;
|
||||
spinlock_t *stats_lock;
|
||||
int ewma_log;
|
||||
|
@ -165,7 +165,7 @@ static void gen_add_node(struct gen_estimator *est)
|
|||
}
|
||||
|
||||
static
|
||||
struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats,
|
||||
struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
|
||||
const struct gnet_stats_rate_est *rate_est)
|
||||
{
|
||||
struct rb_node *p = est_root.rb_node;
|
||||
|
@ -202,7 +202,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats,
|
|||
*
|
||||
* NOTE: Called under rtnl_mutex
|
||||
*/
|
||||
int gen_new_estimator(struct gnet_stats_basic *bstats,
|
||||
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_rate_est *rate_est,
|
||||
spinlock_t *stats_lock,
|
||||
struct nlattr *opt)
|
||||
|
@ -262,7 +262,7 @@ static void __gen_kill_estimator(struct rcu_head *head)
|
|||
*
|
||||
* NOTE: Called under rtnl_mutex
|
||||
*/
|
||||
void gen_kill_estimator(struct gnet_stats_basic *bstats,
|
||||
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_rate_est *rate_est)
|
||||
{
|
||||
struct gen_estimator *e;
|
||||
|
@ -292,7 +292,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
|
|||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*/
|
||||
int gen_replace_estimator(struct gnet_stats_basic *bstats,
|
||||
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_rate_est *rate_est,
|
||||
spinlock_t *stats_lock, struct nlattr *opt)
|
||||
{
|
||||
|
@ -308,7 +308,7 @@ EXPORT_SYMBOL(gen_replace_estimator);
|
|||
*
|
||||
* Returns true if estimator is active, and false if not.
|
||||
*/
|
||||
bool gen_estimator_active(const struct gnet_stats_basic *bstats,
|
||||
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
|
||||
const struct gnet_stats_rate_est *rate_est)
|
||||
{
|
||||
ASSERT_RTNL();
|
||||
|
|
|
@ -106,16 +106,21 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
|
|||
* if the room in the socket buffer was not sufficient.
|
||||
*/
|
||||
int
|
||||
gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic *b)
|
||||
gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
|
||||
{
|
||||
if (d->compat_tc_stats) {
|
||||
d->tc_stats.bytes = b->bytes;
|
||||
d->tc_stats.packets = b->packets;
|
||||
}
|
||||
|
||||
if (d->tail)
|
||||
return gnet_stats_copy(d, TCA_STATS_BASIC, b, sizeof(*b));
|
||||
if (d->tail) {
|
||||
struct gnet_stats_basic sb;
|
||||
|
||||
memset(&sb, 0, sizeof(sb));
|
||||
sb.bytes = b->bytes;
|
||||
sb.packets = b->packets;
|
||||
return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -323,6 +323,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
|||
|
||||
udelay(USEC_PER_POLL);
|
||||
}
|
||||
|
||||
WARN_ONCE(!irqs_disabled(),
|
||||
"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
|
||||
dev->name, ops->ndo_start_xmit);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -1035,6 +1035,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
|
|||
sk->sk_prot = sk->sk_prot_creator = prot;
|
||||
sock_lock_init(sk);
|
||||
sock_net_set(sk, get_net(net));
|
||||
atomic_set(&sk->sk_wmem_alloc, 1);
|
||||
}
|
||||
|
||||
return sk;
|
||||
|
@ -1882,7 +1883,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
|||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&sk->sk_refcnt, 1);
|
||||
atomic_set(&sk->sk_wmem_alloc, 1);
|
||||
atomic_set(&sk->sk_drops, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sock_init_data);
|
||||
|
|
|
@ -951,7 +951,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
|
|||
addend += 4;
|
||||
}
|
||||
dev->needed_headroom = addend + hlen;
|
||||
mtu -= dev->hard_header_len - addend;
|
||||
mtu -= dev->hard_header_len + addend;
|
||||
|
||||
if (mtu < 68)
|
||||
mtu = 68;
|
||||
|
|
|
@ -306,8 +306,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|||
v4addr != htonl(INADDR_ANY) &&
|
||||
chk_addr_ret != RTN_LOCAL &&
|
||||
chk_addr_ret != RTN_MULTICAST &&
|
||||
chk_addr_ret != RTN_BROADCAST)
|
||||
chk_addr_ret != RTN_BROADCAST) {
|
||||
err = -EADDRNOTAVAIL;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
if (addr_type != IPV6_ADDR_ANY) {
|
||||
struct net_device *dev = NULL;
|
||||
|
|
|
@ -914,6 +914,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
|
|||
struct llc_sock *llc = llc_sk(sk);
|
||||
int rc = 0;
|
||||
|
||||
memset(&sllc, 0, sizeof(sllc));
|
||||
lock_sock(sk);
|
||||
if (sock_flag(sk, SOCK_ZAPPED))
|
||||
goto out;
|
||||
|
|
|
@ -74,7 +74,7 @@ static unsigned int
|
|||
xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
{
|
||||
const struct xt_rateest_target_info *info = par->targinfo;
|
||||
struct gnet_stats_basic *stats = &info->est->bstats;
|
||||
struct gnet_stats_basic_packed *stats = &info->est->bstats;
|
||||
|
||||
spin_lock_bh(&info->est->lock);
|
||||
stats->bytes += skb->len;
|
||||
|
|
|
@ -52,7 +52,7 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
|
||||
if (q->master == NULL)
|
||||
return -ENOMEM;
|
||||
return false;
|
||||
|
||||
q->master->quota = q->quota;
|
||||
return true;
|
||||
|
|
|
@ -630,23 +630,23 @@ out:
|
|||
return dev;
|
||||
}
|
||||
|
||||
static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
|
||||
static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
|
||||
ax25_address *digipeaters)
|
||||
{
|
||||
static ax25_digi ax25_digi;
|
||||
int i;
|
||||
|
||||
if (ndigis == 0)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < ndigis; i++) {
|
||||
ax25_digi.calls[i] = digipeaters[i];
|
||||
ax25_digi.repeated[i] = 0;
|
||||
digi->calls[i] = digipeaters[i];
|
||||
digi->repeated[i] = 0;
|
||||
}
|
||||
|
||||
ax25_digi.ndigi = ndigis;
|
||||
ax25_digi.lastrepeat = -1;
|
||||
digi->ndigi = ndigis;
|
||||
digi->lastrepeat = -1;
|
||||
|
||||
return &ax25_digi;
|
||||
return digi;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -656,6 +656,7 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
|
|||
{
|
||||
struct nr_route_struct nr_route;
|
||||
struct net_device *dev;
|
||||
ax25_digi digi;
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
|
@ -673,13 +674,15 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
|
|||
ret = nr_add_node(&nr_route.callsign,
|
||||
nr_route.mnemonic,
|
||||
&nr_route.neighbour,
|
||||
nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
|
||||
nr_call_to_digi(&digi, nr_route.ndigis,
|
||||
nr_route.digipeaters),
|
||||
dev, nr_route.quality,
|
||||
nr_route.obs_count);
|
||||
break;
|
||||
case NETROM_NEIGH:
|
||||
ret = nr_add_neigh(&nr_route.callsign,
|
||||
nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
|
||||
nr_call_to_digi(&digi, nr_route.ndigis,
|
||||
nr_route.digipeaters),
|
||||
dev, nr_route.quality);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -49,7 +49,7 @@ struct atm_flow_data {
|
|||
struct socket *sock; /* for closing */
|
||||
u32 classid; /* x:y type ID */
|
||||
int ref; /* reference count */
|
||||
struct gnet_stats_basic bstats;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct atm_flow_data *next;
|
||||
struct atm_flow_data *excess; /* flow for excess traffic;
|
||||
|
|
|
@ -128,7 +128,7 @@ struct cbq_class
|
|||
long avgidle;
|
||||
long deficit; /* Saved deficit for WRR */
|
||||
psched_time_t penalized;
|
||||
struct gnet_stats_basic bstats;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct gnet_stats_rate_est rate_est;
|
||||
struct tc_cbq_xstats xstats;
|
||||
|
|
|
@ -22,7 +22,7 @@ struct drr_class {
|
|||
unsigned int refcnt;
|
||||
unsigned int filter_cnt;
|
||||
|
||||
struct gnet_stats_basic bstats;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct gnet_stats_rate_est rate_est;
|
||||
struct list_head alist;
|
||||
|
|
|
@ -116,7 +116,7 @@ struct hfsc_class
|
|||
struct Qdisc_class_common cl_common;
|
||||
unsigned int refcnt; /* usage count */
|
||||
|
||||
struct gnet_stats_basic bstats;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct gnet_stats_rate_est rate_est;
|
||||
unsigned int level; /* class level in hierarchy */
|
||||
|
|
|
@ -74,7 +74,7 @@ enum htb_cmode {
|
|||
struct htb_class {
|
||||
struct Qdisc_class_common common;
|
||||
/* general class parameters */
|
||||
struct gnet_stats_basic bstats;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct gnet_stats_rate_est rate_est;
|
||||
struct tc_htb_xstats xstats; /* our special stats */
|
||||
|
|
Loading…
Reference in a new issue