mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits) net: Fix routing tables with id > 255 for legacy software sky2: Hold RTNL while calling dev_close() s2io iomem annotations atl1: fix suspend regression qeth: start dev queue after tx drop error qeth: Prepare-function to call s390dbf was wrong qeth: reduce number of kernel messages qeth: Use ccw_device_get_id(). qeth: layer 3 Oops in ip event handler virtio: use callback on empty in virtio_net virtio: virtio_net free transmit skbs in a timer virtio: Fix typo in virtio_net_hdr comments virtio_net: Fix skb->csum_start computation ehea: set mac address fix sfc: Recover from RX queue flush failure add missing lance_* exports ixgbe: fix typo forcedeth: msi interrupts ipsec: pfkey should ignore events when no listeners pppoe: Unshare skb before anything else ...
This commit is contained in:
commit
f7f866eed0
47 changed files with 360 additions and 365 deletions
|
@ -18,7 +18,6 @@
|
|||
#include "midway.h"
|
||||
|
||||
|
||||
#define KERNEL_OFFSET 0xC0000000 /* kernel 0x0 is at phys 0xC0000000 */
|
||||
#define DEV_LABEL "eni"
|
||||
|
||||
#define UBR_BUFFER (128*1024) /* UBR buffer size */
|
||||
|
|
|
@ -806,7 +806,6 @@ static int DIVA_INIT_FUNCTION divas_init(void)
|
|||
|
||||
if (!create_divas_proc()) {
|
||||
#ifdef MODULE
|
||||
remove_divas_proc();
|
||||
divas_unregister_chrdev();
|
||||
divasfunc_exit();
|
||||
#endif
|
||||
|
|
|
@ -125,8 +125,8 @@ static const struct file_operations divas_fops = {
|
|||
|
||||
int create_divas_proc(void)
|
||||
{
|
||||
proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon,
|
||||
&divas_fops);
|
||||
divas_proc_entry = proc_create(divas_proc_name, S_IFREG | S_IRUGO,
|
||||
proc_net_eicon, &divas_fops);
|
||||
if (!divas_proc_entry)
|
||||
return (0);
|
||||
|
||||
|
|
|
@ -207,30 +207,17 @@ hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t
|
|||
/* read conf file -> output card info data */
|
||||
/*******************************************/
|
||||
static ssize_t
|
||||
hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t * off)
|
||||
hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off)
|
||||
{
|
||||
char *cp;
|
||||
int i;
|
||||
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
if (!(cp = file->private_data))
|
||||
return (-EFAULT); /* should never happen */
|
||||
i = strlen(cp); /* get total string length */
|
||||
if (*off < i) {
|
||||
/* still bytes to transfer */
|
||||
cp += *off; /* point to desired data offset */
|
||||
i -= *off; /* remaining length */
|
||||
if (i > count)
|
||||
i = count; /* limit length to transfer */
|
||||
if (copy_to_user(buf, cp, i))
|
||||
return (-EFAULT); /* copy error */
|
||||
*off += i; /* adjust offset */
|
||||
} else
|
||||
return (0);
|
||||
} else
|
||||
return (-EPERM); /* no permission to read */
|
||||
if (!(file->f_mode & FMODE_READ))
|
||||
return -EPERM; /* no permission to read */
|
||||
|
||||
return (i);
|
||||
if (!(cp = file->private_data))
|
||||
return -EFAULT; /* should never happen */
|
||||
|
||||
return simple_read_from_buffer(buf, count, off, cp, strlen(cp));
|
||||
} /* hysdn_conf_read */
|
||||
|
||||
/******************/
|
||||
|
|
|
@ -506,6 +506,7 @@ int lance_open (struct net_device *dev)
|
|||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lance_open);
|
||||
|
||||
int lance_close (struct net_device *dev)
|
||||
{
|
||||
|
@ -521,6 +522,7 @@ int lance_close (struct net_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lance_close);
|
||||
|
||||
void lance_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
|
@ -529,7 +531,7 @@ void lance_tx_timeout(struct net_device *dev)
|
|||
dev->trans_start = jiffies;
|
||||
netif_wake_queue (dev);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(lance_tx_timeout);
|
||||
|
||||
int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
|
@ -586,6 +588,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lance_start_xmit);
|
||||
|
||||
/* taken from the depca driver via a2065.c */
|
||||
static void lance_load_multicast (struct net_device *dev)
|
||||
|
@ -654,6 +657,7 @@ void lance_set_multicast (struct net_device *dev)
|
|||
if (!stopped)
|
||||
netif_start_queue (dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lance_set_multicast);
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
void lance_poll(struct net_device *dev)
|
||||
|
|
|
@ -636,22 +636,6 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
|
|||
return atl1_write_phy_reg(hw, 30, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the PHY into power saving mode using vendor magic.
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
static void atl1_phy_enter_power_saving(struct atl1_hw *hw)
|
||||
{
|
||||
atl1_write_phy_reg(hw, MII_DBG_ADDR, 0);
|
||||
atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E);
|
||||
atl1_write_phy_reg(hw, MII_DBG_ADDR, 2);
|
||||
atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
|
||||
atl1_write_phy_reg(hw, MII_DBG_ADDR, 3);
|
||||
atl1_write_phy_reg(hw, MII_DBG_DATA, 0);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Resets the PHY and make all config validate
|
||||
* hw - Struct containing variables accessed by shared code
|
||||
|
@ -2860,7 +2844,6 @@ disable_wol:
|
|||
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
|
||||
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
atl1_phy_enter_power_saving(hw);
|
||||
hw->phy_configured = false;
|
||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
|
||||
exit:
|
||||
|
|
|
@ -1766,16 +1766,20 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
|
|||
mutex_lock(&ehea_bcmc_regs.lock);
|
||||
|
||||
/* Deregister old MAC in pHYP */
|
||||
ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
||||
if (ret)
|
||||
goto out_upregs;
|
||||
if (port->state == EHEA_PORT_UP) {
|
||||
ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
||||
if (ret)
|
||||
goto out_upregs;
|
||||
}
|
||||
|
||||
port->mac_addr = cb0->port_mac_addr << 16;
|
||||
|
||||
/* Register new MAC in pHYP */
|
||||
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
||||
if (ret)
|
||||
goto out_upregs;
|
||||
if (port->state == EHEA_PORT_UP) {
|
||||
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
||||
if (ret)
|
||||
goto out_upregs;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
|
|
@ -3273,6 +3273,20 @@ static void nv_link_irq(struct net_device *dev)
|
|||
dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
|
||||
}
|
||||
|
||||
static void nv_msi_workaround(struct fe_priv *np)
|
||||
{
|
||||
|
||||
/* Need to toggle the msi irq mask within the ethernet device,
|
||||
* otherwise, future interrupts will not be detected.
|
||||
*/
|
||||
if (np->msi_flags & NV_MSI_ENABLED) {
|
||||
u8 __iomem *base = np->base;
|
||||
|
||||
writel(0, base + NvRegMSIIrqMask);
|
||||
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t nv_nic_irq(int foo, void *data)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *) data;
|
||||
|
@ -3295,6 +3309,8 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
|
|||
if (!(events & np->irqmask))
|
||||
break;
|
||||
|
||||
nv_msi_workaround(np);
|
||||
|
||||
spin_lock(&np->lock);
|
||||
nv_tx_done(dev);
|
||||
spin_unlock(&np->lock);
|
||||
|
@ -3410,6 +3426,8 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
|
|||
if (!(events & np->irqmask))
|
||||
break;
|
||||
|
||||
nv_msi_workaround(np);
|
||||
|
||||
spin_lock(&np->lock);
|
||||
nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
|
||||
spin_unlock(&np->lock);
|
||||
|
@ -3750,6 +3768,8 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
|
|||
if (!(events & NVREG_IRQ_TIMER))
|
||||
return IRQ_RETVAL(0);
|
||||
|
||||
nv_msi_workaround(np);
|
||||
|
||||
spin_lock(&np->lock);
|
||||
np->intr_test = 1;
|
||||
spin_unlock(&np->lock);
|
||||
|
|
|
@ -329,6 +329,7 @@ config PXA_FICP
|
|||
config MCS_FIR
|
||||
tristate "MosChip MCS7780 IrDA-USB dongle"
|
||||
depends on IRDA && USB && EXPERIMENTAL
|
||||
select CRC32
|
||||
help
|
||||
Say Y or M here if you want to build support for the MosChip
|
||||
MCS7780 IrDA-USB bridge device driver.
|
||||
|
|
|
@ -58,8 +58,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
|
|||
|
||||
static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
|
||||
{
|
||||
hw->mac.num_rx_queues = IXGBE_82598_MAX_TX_QUEUES;
|
||||
hw->mac.num_tx_queues = IXGBE_82598_MAX_RX_QUEUES;
|
||||
hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
|
||||
hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
|
||||
hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES;
|
||||
|
||||
/* PHY ops are filled in by default properly for Fiber only */
|
||||
|
|
|
@ -341,12 +341,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
|
|||
struct pppox_sock *relay_po;
|
||||
|
||||
if (sk->sk_state & PPPOX_BOUND) {
|
||||
struct pppoe_hdr *ph = pppoe_hdr(skb);
|
||||
int len = ntohs(ph->length);
|
||||
skb_pull_rcsum(skb, sizeof(struct pppoe_hdr));
|
||||
if (pskb_trim_rcsum(skb, len))
|
||||
goto abort_kfree;
|
||||
|
||||
ppp_input(&po->chan, skb);
|
||||
} else if (sk->sk_state & PPPOX_RELAY) {
|
||||
relay_po = get_item_by_addr(&po->pppoe_relay);
|
||||
|
@ -357,7 +351,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
|
|||
if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
|
||||
goto abort_put;
|
||||
|
||||
skb_pull(skb, sizeof(struct pppoe_hdr));
|
||||
if (!__pppoe_xmit(sk_pppox(relay_po), skb))
|
||||
goto abort_put;
|
||||
} else {
|
||||
|
@ -388,6 +381,7 @@ static int pppoe_rcv(struct sk_buff *skb,
|
|||
{
|
||||
struct pppoe_hdr *ph;
|
||||
struct pppox_sock *po;
|
||||
int len;
|
||||
|
||||
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
|
||||
goto out;
|
||||
|
@ -399,10 +393,21 @@ static int pppoe_rcv(struct sk_buff *skb,
|
|||
goto drop;
|
||||
|
||||
ph = pppoe_hdr(skb);
|
||||
len = ntohs(ph->length);
|
||||
|
||||
skb_pull_rcsum(skb, sizeof(*ph));
|
||||
if (skb->len < len)
|
||||
goto drop;
|
||||
|
||||
po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
|
||||
if (po != NULL)
|
||||
return sk_receive_skb(sk_pppox(po), skb, 0);
|
||||
if (!po)
|
||||
goto drop;
|
||||
|
||||
if (pskb_trim_rcsum(skb, len))
|
||||
goto drop;
|
||||
|
||||
return sk_receive_skb(sk_pppox(po), skb, 0);
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
|
@ -427,12 +432,12 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
|
|||
if (dev_net(dev) != &init_net)
|
||||
goto abort;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
|
||||
goto abort;
|
||||
|
||||
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
|
||||
goto out;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
|
||||
goto abort;
|
||||
|
||||
ph = pppoe_hdr(skb);
|
||||
if (ph->code != PADT_CODE)
|
||||
goto abort;
|
||||
|
@ -937,12 +942,10 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
m->msg_namelen = 0;
|
||||
|
||||
if (skb) {
|
||||
struct pppoe_hdr *ph = pppoe_hdr(skb);
|
||||
const int len = ntohs(ph->length);
|
||||
|
||||
error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len);
|
||||
total_len = min(total_len, skb->len);
|
||||
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
|
||||
if (error == 0)
|
||||
error = len;
|
||||
error = total_len;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -783,14 +783,18 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
err = 0;
|
||||
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
|
||||
flags & MSG_DONTWAIT, &err);
|
||||
if (skb) {
|
||||
err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data,
|
||||
skb->len);
|
||||
if (err < 0)
|
||||
goto do_skb_free;
|
||||
err = skb->len;
|
||||
}
|
||||
do_skb_free:
|
||||
if (!skb)
|
||||
goto end;
|
||||
|
||||
if (len > skb->len)
|
||||
len = skb->len;
|
||||
else if (len < skb->len)
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
|
||||
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
|
||||
if (likely(err == 0))
|
||||
err = len;
|
||||
|
||||
kfree_skb(skb);
|
||||
end:
|
||||
return err;
|
||||
|
|
|
@ -733,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
|
|||
continue;
|
||||
break;
|
||||
}
|
||||
if (rc)
|
||||
if (rc) {
|
||||
EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
|
||||
efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
|
||||
}
|
||||
|
||||
/* Remove RX descriptor ring from card */
|
||||
EFX_ZERO_OWORD(rx_desc_ptr);
|
||||
|
|
|
@ -4404,7 +4404,9 @@ static int sky2_resume(struct pci_dev *pdev)
|
|||
if (err) {
|
||||
printk(KERN_ERR PFX "%s: could not up: %d\n",
|
||||
dev->name, err);
|
||||
rtnl_lock();
|
||||
dev_close(dev);
|
||||
rtnl_unlock();
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,8 +64,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.92"
|
||||
#define DRV_MODULE_RELDATE "May 2, 2008"
|
||||
#define DRV_MODULE_VERSION "3.92.1"
|
||||
#define DRV_MODULE_RELDATE "June 9, 2008"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
|
@ -1295,6 +1295,21 @@ static void tg3_frob_aux_power(struct tg3 *tp)
|
|||
GRC_LCLCTRL_GPIO_OUTPUT0 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT1),
|
||||
100);
|
||||
} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
|
||||
/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
|
||||
u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
|
||||
GRC_LCLCTRL_GPIO_OE1 |
|
||||
GRC_LCLCTRL_GPIO_OE2 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT0 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT1 |
|
||||
tp->grc_local_ctrl;
|
||||
tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
|
||||
|
||||
grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
|
||||
tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
|
||||
|
||||
grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
|
||||
tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
|
||||
} else {
|
||||
u32 no_gpio2;
|
||||
u32 grc_local_ctrl = 0;
|
||||
|
@ -3168,8 +3183,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
|
|||
err |= tg3_readphy(tp, MII_BMCR, &bmcr);
|
||||
|
||||
if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
|
||||
(tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
|
||||
tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
|
||||
(tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
|
||||
/* do nothing, just check for link up at the end */
|
||||
} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
|
||||
u32 adv, new_adv;
|
||||
|
@ -8599,7 +8613,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|||
(cmd->speed == SPEED_1000))
|
||||
return -EINVAL;
|
||||
else if ((cmd->speed == SPEED_1000) &&
|
||||
(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
|
||||
(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
|
||||
return -EINVAL;
|
||||
|
||||
tg3_full_lock(tp, 0);
|
||||
|
@ -11768,6 +11782,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
|
|||
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
|
||||
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
|
||||
|
||||
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
|
||||
/* Turn off the debug UART. */
|
||||
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
|
||||
if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
|
||||
/* Keep VMain power. */
|
||||
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT0;
|
||||
}
|
||||
|
||||
/* Force the chip into D0. */
|
||||
err = tg3_set_power_state(tp, PCI_D0);
|
||||
if (err) {
|
||||
|
|
|
@ -44,11 +44,15 @@ struct virtnet_info
|
|||
/* The skb we couldn't send because buffers were full. */
|
||||
struct sk_buff *last_xmit_skb;
|
||||
|
||||
/* If we need to free in a timer, this is it. */
|
||||
struct timer_list xmit_free_timer;
|
||||
|
||||
/* Number of input buffers, and max we've ever had. */
|
||||
unsigned int num, max;
|
||||
|
||||
/* For cleaning up after transmission. */
|
||||
struct tasklet_struct tasklet;
|
||||
bool free_in_tasklet;
|
||||
|
||||
/* Receive & send queues. */
|
||||
struct sk_buff_head recv;
|
||||
|
@ -72,7 +76,7 @@ static void skb_xmit_done(struct virtqueue *svq)
|
|||
/* Suppress further interrupts. */
|
||||
svq->vq_ops->disable_cb(svq);
|
||||
|
||||
/* We were waiting for more output buffers. */
|
||||
/* We were probably waiting for more output buffers. */
|
||||
netif_wake_queue(vi->dev);
|
||||
|
||||
/* Make sure we re-xmit last_xmit_skb: if there are no more packets
|
||||
|
@ -94,9 +98,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
|
|||
BUG_ON(len > MAX_PACKET_LEN);
|
||||
|
||||
skb_trim(skb, len);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
|
||||
ntohs(skb->protocol), skb->len, skb->pkt_type);
|
||||
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
dev->stats.rx_packets++;
|
||||
|
||||
|
@ -106,6 +108,10 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
|
|||
goto frame_err;
|
||||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
|
||||
ntohs(skb->protocol), skb->len, skb->pkt_type);
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
pr_debug("GSO!\n");
|
||||
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
|
@ -238,9 +244,25 @@ static void free_old_xmit_skbs(struct virtnet_info *vi)
|
|||
}
|
||||
}
|
||||
|
||||
/* If the virtio transport doesn't always notify us when all in-flight packets
|
||||
* are consumed, we fall back to using this function on a timer to free them. */
|
||||
static void xmit_free(unsigned long data)
|
||||
{
|
||||
struct virtnet_info *vi = (void *)data;
|
||||
|
||||
netif_tx_lock(vi->dev);
|
||||
|
||||
free_old_xmit_skbs(vi);
|
||||
|
||||
if (!skb_queue_empty(&vi->send))
|
||||
mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
|
||||
|
||||
netif_tx_unlock(vi->dev);
|
||||
}
|
||||
|
||||
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
|
||||
{
|
||||
int num;
|
||||
int num, err;
|
||||
struct scatterlist sg[2+MAX_SKB_FRAGS];
|
||||
struct virtio_net_hdr *hdr;
|
||||
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
|
||||
|
@ -283,7 +305,11 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
|
|||
vnet_hdr_to_sg(sg, skb);
|
||||
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
|
||||
|
||||
return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
|
||||
err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
|
||||
if (!err && !vi->free_in_tasklet)
|
||||
mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void xmit_tasklet(unsigned long data)
|
||||
|
@ -295,6 +321,8 @@ static void xmit_tasklet(unsigned long data)
|
|||
vi->svq->vq_ops->kick(vi->svq);
|
||||
vi->last_xmit_skb = NULL;
|
||||
}
|
||||
if (vi->free_in_tasklet)
|
||||
free_old_xmit_skbs(vi);
|
||||
netif_tx_unlock_bh(vi->dev);
|
||||
}
|
||||
|
||||
|
@ -435,6 +463,10 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
vi->vdev = vdev;
|
||||
vdev->priv = vi;
|
||||
|
||||
/* If they give us a callback when all buffers are done, we don't need
|
||||
* the timer. */
|
||||
vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
|
||||
|
||||
/* We expect two virtqueues, receive then send. */
|
||||
vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
|
||||
if (IS_ERR(vi->rvq)) {
|
||||
|
@ -454,6 +486,9 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
|
||||
tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
|
||||
|
||||
if (!vi->free_in_tasklet)
|
||||
setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
pr_debug("virtio_net: registering device failed\n");
|
||||
|
@ -491,6 +526,9 @@ static void virtnet_remove(struct virtio_device *vdev)
|
|||
/* Stop all the virtqueues. */
|
||||
vdev->config->reset(vdev);
|
||||
|
||||
if (!vi->free_in_tasklet)
|
||||
del_timer_sync(&vi->xmit_free_timer);
|
||||
|
||||
/* Free our skbs in send and recv queues, if any. */
|
||||
while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
|
||||
kfree_skb(skb);
|
||||
|
@ -514,7 +552,7 @@ static struct virtio_device_id id_table[] = {
|
|||
static unsigned int features[] = {
|
||||
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
|
||||
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
|
||||
VIRTIO_NET_F_HOST_ECN,
|
||||
VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
|
||||
};
|
||||
|
||||
static struct virtio_driver virtio_net = {
|
||||
|
|
|
@ -1753,6 +1753,8 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
|
|||
|
||||
if (priv->workqueue) {
|
||||
cancel_delayed_work(&priv->request_scan);
|
||||
cancel_delayed_work(&priv->request_direct_scan);
|
||||
cancel_delayed_work(&priv->request_passive_scan);
|
||||
cancel_delayed_work(&priv->scan_event);
|
||||
}
|
||||
queue_work(priv->workqueue, &priv->down);
|
||||
|
@ -2005,6 +2007,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
|
|||
wake_up_interruptible(&priv->wait_command_queue);
|
||||
priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
|
||||
cancel_delayed_work(&priv->request_scan);
|
||||
cancel_delayed_work(&priv->request_direct_scan);
|
||||
cancel_delayed_work(&priv->request_passive_scan);
|
||||
cancel_delayed_work(&priv->scan_event);
|
||||
schedule_work(&priv->link_down);
|
||||
queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
|
||||
|
@ -4712,6 +4716,12 @@ static void ipw_rx_notification(struct ipw_priv *priv,
|
|||
priv->status &= ~STATUS_SCAN_FORCED;
|
||||
#endif /* CONFIG_IPW2200_MONITOR */
|
||||
|
||||
/* Do queued direct scans first */
|
||||
if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
|
||||
queue_delayed_work(priv->workqueue,
|
||||
&priv->request_direct_scan, 0);
|
||||
}
|
||||
|
||||
if (!(priv->status & (STATUS_ASSOCIATED |
|
||||
STATUS_ASSOCIATING |
|
||||
STATUS_ROAMING |
|
||||
|
@ -6267,7 +6277,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
|
||||
static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
|
||||
{
|
||||
struct ipw_scan_request_ext scan;
|
||||
int err = 0, scan_type;
|
||||
|
@ -6278,22 +6288,31 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
|
|||
|
||||
mutex_lock(&priv->mutex);
|
||||
|
||||
if (direct && (priv->direct_scan_ssid_len == 0)) {
|
||||
IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
|
||||
priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (priv->status & STATUS_SCANNING) {
|
||||
IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
|
||||
priv->status |= STATUS_SCAN_PENDING;
|
||||
IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
|
||||
priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
|
||||
STATUS_SCAN_PENDING;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!(priv->status & STATUS_SCAN_FORCED) &&
|
||||
priv->status & STATUS_SCAN_ABORTING) {
|
||||
IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
|
||||
priv->status |= STATUS_SCAN_PENDING;
|
||||
priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
|
||||
STATUS_SCAN_PENDING;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (priv->status & STATUS_RF_KILL_MASK) {
|
||||
IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
|
||||
priv->status |= STATUS_SCAN_PENDING;
|
||||
IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
|
||||
priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
|
||||
STATUS_SCAN_PENDING;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -6321,6 +6340,7 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
|
|||
cpu_to_le16(20);
|
||||
|
||||
scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
|
||||
scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
|
||||
|
||||
#ifdef CONFIG_IPW2200_MONITOR
|
||||
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
|
||||
|
@ -6360,13 +6380,23 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
|
|||
cpu_to_le16(2000);
|
||||
} else {
|
||||
#endif /* CONFIG_IPW2200_MONITOR */
|
||||
/* If we are roaming, then make this a directed scan for the
|
||||
* current network. Otherwise, ensure that every other scan
|
||||
* is a fast channel hop scan */
|
||||
if ((priv->status & STATUS_ROAMING)
|
||||
|| (!(priv->status & STATUS_ASSOCIATED)
|
||||
&& (priv->config & CFG_STATIC_ESSID)
|
||||
&& (le32_to_cpu(scan.full_scan_index) % 2))) {
|
||||
/* Honor direct scans first, otherwise if we are roaming make
|
||||
* this a direct scan for the current network. Finally,
|
||||
* ensure that every other scan is a fast channel hop scan */
|
||||
if (direct) {
|
||||
err = ipw_send_ssid(priv, priv->direct_scan_ssid,
|
||||
priv->direct_scan_ssid_len);
|
||||
if (err) {
|
||||
IPW_DEBUG_HC("Attempt to send SSID command "
|
||||
"failed\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
|
||||
} else if ((priv->status & STATUS_ROAMING)
|
||||
|| (!(priv->status & STATUS_ASSOCIATED)
|
||||
&& (priv->config & CFG_STATIC_ESSID)
|
||||
&& (le32_to_cpu(scan.full_scan_index) % 2))) {
|
||||
err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
|
||||
if (err) {
|
||||
IPW_DEBUG_HC("Attempt to send SSID command "
|
||||
|
@ -6391,7 +6421,12 @@ send_request:
|
|||
}
|
||||
|
||||
priv->status |= STATUS_SCANNING;
|
||||
priv->status &= ~STATUS_SCAN_PENDING;
|
||||
if (direct) {
|
||||
priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
|
||||
priv->direct_scan_ssid_len = 0;
|
||||
} else
|
||||
priv->status &= ~STATUS_SCAN_PENDING;
|
||||
|
||||
queue_delayed_work(priv->workqueue, &priv->scan_check,
|
||||
IPW_SCAN_CHECK_WATCHDOG);
|
||||
done:
|
||||
|
@ -6402,15 +6437,22 @@ done:
|
|||
static void ipw_request_passive_scan(struct work_struct *work)
|
||||
{
|
||||
struct ipw_priv *priv =
|
||||
container_of(work, struct ipw_priv, request_passive_scan);
|
||||
ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
|
||||
container_of(work, struct ipw_priv, request_passive_scan.work);
|
||||
ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
|
||||
}
|
||||
|
||||
static void ipw_request_scan(struct work_struct *work)
|
||||
{
|
||||
struct ipw_priv *priv =
|
||||
container_of(work, struct ipw_priv, request_scan.work);
|
||||
ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
|
||||
ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
|
||||
}
|
||||
|
||||
static void ipw_request_direct_scan(struct work_struct *work)
|
||||
{
|
||||
struct ipw_priv *priv =
|
||||
container_of(work, struct ipw_priv, request_direct_scan.work);
|
||||
ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
|
||||
}
|
||||
|
||||
static void ipw_bg_abort_scan(struct work_struct *work)
|
||||
|
@ -9477,99 +9519,38 @@ static int ipw_wx_get_retry(struct net_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
|
||||
int essid_len)
|
||||
{
|
||||
struct ipw_scan_request_ext scan;
|
||||
int err = 0, scan_type;
|
||||
|
||||
if (!(priv->status & STATUS_INIT) ||
|
||||
(priv->status & STATUS_EXIT_PENDING))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
|
||||
if (priv->status & STATUS_RF_KILL_MASK) {
|
||||
IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
|
||||
priv->status |= STATUS_SCAN_PENDING;
|
||||
goto done;
|
||||
}
|
||||
|
||||
IPW_DEBUG_HC("starting request direct scan!\n");
|
||||
|
||||
if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
|
||||
/* We should not sleep here; otherwise we will block most
|
||||
* of the system (for instance, we hold rtnl_lock when we
|
||||
* get here).
|
||||
*/
|
||||
err = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
memset(&scan, 0, sizeof(scan));
|
||||
|
||||
if (priv->config & CFG_SPEED_SCAN)
|
||||
scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
|
||||
cpu_to_le16(30);
|
||||
else
|
||||
scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
|
||||
cpu_to_le16(20);
|
||||
|
||||
scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
|
||||
cpu_to_le16(20);
|
||||
scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
|
||||
scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
|
||||
|
||||
scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
|
||||
|
||||
err = ipw_send_ssid(priv, essid, essid_len);
|
||||
if (err) {
|
||||
IPW_DEBUG_HC("Attempt to send SSID command failed\n");
|
||||
goto done;
|
||||
}
|
||||
scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
|
||||
|
||||
ipw_add_scan_channels(priv, &scan, scan_type);
|
||||
|
||||
err = ipw_send_scan_request_ext(priv, &scan);
|
||||
if (err) {
|
||||
IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
|
||||
goto done;
|
||||
}
|
||||
|
||||
priv->status |= STATUS_SCANNING;
|
||||
|
||||
done:
|
||||
mutex_unlock(&priv->mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ipw_wx_set_scan(struct net_device *dev,
|
||||
struct iw_request_info *info,
|
||||
union iwreq_data *wrqu, char *extra)
|
||||
{
|
||||
struct ipw_priv *priv = ieee80211_priv(dev);
|
||||
struct iw_scan_req *req = (struct iw_scan_req *)extra;
|
||||
struct delayed_work *work = NULL;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
|
||||
priv->user_requested_scan = 1;
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
if (wrqu->data.length == sizeof(struct iw_scan_req)) {
|
||||
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
|
||||
ipw_request_direct_scan(priv, req->essid,
|
||||
req->essid_len);
|
||||
return 0;
|
||||
}
|
||||
if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
|
||||
queue_work(priv->workqueue,
|
||||
&priv->request_passive_scan);
|
||||
return 0;
|
||||
int len = min((int)req->essid_len,
|
||||
(int)sizeof(priv->direct_scan_ssid));
|
||||
memcpy(priv->direct_scan_ssid, req->essid, len);
|
||||
priv->direct_scan_ssid_len = len;
|
||||
work = &priv->request_direct_scan;
|
||||
} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
|
||||
work = &priv->request_passive_scan;
|
||||
}
|
||||
} else {
|
||||
/* Normal active broadcast scan */
|
||||
work = &priv->request_scan;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
IPW_DEBUG_WX("Start scan\n");
|
||||
|
||||
queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
|
||||
queue_delayed_work(priv->workqueue, work, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -10731,6 +10712,8 @@ static void ipw_link_up(struct ipw_priv *priv)
|
|||
}
|
||||
|
||||
cancel_delayed_work(&priv->request_scan);
|
||||
cancel_delayed_work(&priv->request_direct_scan);
|
||||
cancel_delayed_work(&priv->request_passive_scan);
|
||||
cancel_delayed_work(&priv->scan_event);
|
||||
ipw_reset_stats(priv);
|
||||
/* Ensure the rate is updated immediately */
|
||||
|
@ -10761,6 +10744,8 @@ static void ipw_link_down(struct ipw_priv *priv)
|
|||
|
||||
/* Cancel any queued work ... */
|
||||
cancel_delayed_work(&priv->request_scan);
|
||||
cancel_delayed_work(&priv->request_direct_scan);
|
||||
cancel_delayed_work(&priv->request_passive_scan);
|
||||
cancel_delayed_work(&priv->adhoc_check);
|
||||
cancel_delayed_work(&priv->gather_stats);
|
||||
|
||||
|
@ -10800,8 +10785,9 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
|
|||
INIT_WORK(&priv->up, ipw_bg_up);
|
||||
INIT_WORK(&priv->down, ipw_bg_down);
|
||||
INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
|
||||
INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
|
||||
INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
|
||||
INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
|
||||
INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
|
||||
INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
|
||||
INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
|
||||
INIT_WORK(&priv->roam, ipw_bg_roam);
|
||||
|
@ -11835,6 +11821,8 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
|
|||
cancel_delayed_work(&priv->adhoc_check);
|
||||
cancel_delayed_work(&priv->gather_stats);
|
||||
cancel_delayed_work(&priv->request_scan);
|
||||
cancel_delayed_work(&priv->request_direct_scan);
|
||||
cancel_delayed_work(&priv->request_passive_scan);
|
||||
cancel_delayed_work(&priv->scan_event);
|
||||
cancel_delayed_work(&priv->rf_kill);
|
||||
cancel_delayed_work(&priv->scan_check);
|
||||
|
|
|
@ -1037,6 +1037,7 @@ struct ipw_cmd { /* XXX */
|
|||
#define STATUS_DISASSOC_PENDING (1<<12)
|
||||
#define STATUS_STATE_PENDING (1<<13)
|
||||
|
||||
#define STATUS_DIRECT_SCAN_PENDING (1<<19)
|
||||
#define STATUS_SCAN_PENDING (1<<20)
|
||||
#define STATUS_SCANNING (1<<21)
|
||||
#define STATUS_SCAN_ABORTING (1<<22)
|
||||
|
@ -1292,6 +1293,8 @@ struct ipw_priv {
|
|||
struct iw_public_data wireless_data;
|
||||
|
||||
int user_requested_scan;
|
||||
u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
|
||||
u8 direct_scan_ssid_len;
|
||||
|
||||
struct workqueue_struct *workqueue;
|
||||
|
||||
|
@ -1301,8 +1304,9 @@ struct ipw_priv {
|
|||
struct work_struct system_config;
|
||||
struct work_struct rx_replenish;
|
||||
struct delayed_work request_scan;
|
||||
struct delayed_work request_direct_scan;
|
||||
struct delayed_work request_passive_scan;
|
||||
struct delayed_work scan_event;
|
||||
struct work_struct request_passive_scan;
|
||||
struct work_struct adapter_restart;
|
||||
struct delayed_work rf_kill;
|
||||
struct work_struct up;
|
||||
|
|
|
@ -229,14 +229,15 @@ static int iwl3945_led_register_led(struct iwl3945_priv *priv,
|
|||
led->led_dev.brightness_set = iwl3945_led_brightness_set;
|
||||
led->led_dev.default_trigger = trigger;
|
||||
|
||||
led->priv = priv;
|
||||
led->type = type;
|
||||
|
||||
ret = led_classdev_register(device, &led->led_dev);
|
||||
if (ret) {
|
||||
IWL_ERROR("Error: failed to register led handler.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
led->priv = priv;
|
||||
led->type = type;
|
||||
led->registered = 1;
|
||||
|
||||
if (set_led && led->led_on)
|
||||
|
|
|
@ -1842,6 +1842,9 @@ static void lbs_send_confirmsleep(struct lbs_private *priv)
|
|||
|
||||
spin_lock_irqsave(&priv->driver_lock, flags);
|
||||
|
||||
/* We don't get a response on the sleep-confirmation */
|
||||
priv->dnld_sent = DNLD_RES_RECEIVED;
|
||||
|
||||
/* If nothing to do, go back to sleep (?) */
|
||||
if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx])
|
||||
priv->psstate = PS_STATE_SLEEP;
|
||||
|
@ -1904,12 +1907,12 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
|
|||
|
||||
lbs_deb_enter(LBS_DEB_HOST);
|
||||
|
||||
spin_lock_irqsave(&priv->driver_lock, flags);
|
||||
if (priv->dnld_sent) {
|
||||
allowed = 0;
|
||||
lbs_deb_host("dnld_sent was set\n");
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->driver_lock, flags);
|
||||
/* In-progress command? */
|
||||
if (priv->cur_cmd) {
|
||||
allowed = 0;
|
||||
|
|
|
@ -732,8 +732,8 @@ static int lbs_thread(void *data)
|
|||
lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n",
|
||||
priv->currenttxskb, priv->dnld_sent);
|
||||
|
||||
spin_lock_irq(&priv->driver_lock);
|
||||
/* Process any pending command response */
|
||||
spin_lock_irq(&priv->driver_lock);
|
||||
resp_idx = priv->resp_idx;
|
||||
if (priv->resp_len[resp_idx]) {
|
||||
spin_unlock_irq(&priv->driver_lock);
|
||||
|
|
|
@ -719,7 +719,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
|
|||
fc = le16_to_cpu(*((__le16 *) buffer));
|
||||
|
||||
is_qos = ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
|
||||
((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_QOS_DATA);
|
||||
(fc & IEEE80211_STYPE_QOS_DATA);
|
||||
is_4addr = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
|
||||
(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
|
||||
need_padding = is_qos ^ is_4addr;
|
||||
|
|
|
@ -290,9 +290,6 @@ int qeth_set_large_send(struct qeth_card *card,
|
|||
card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
|
||||
NETIF_F_HW_CSUM;
|
||||
} else {
|
||||
PRINT_WARN("TSO not supported on %s. "
|
||||
"large_send set to 'no'.\n",
|
||||
card->dev->name);
|
||||
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
|
||||
NETIF_F_HW_CSUM);
|
||||
card->options.large_send = QETH_LARGE_SEND_NO;
|
||||
|
@ -1407,12 +1404,6 @@ static void qeth_init_func_level(struct qeth_card *card)
|
|||
}
|
||||
}
|
||||
|
||||
static inline __u16 qeth_raw_devno_from_bus_id(char *id)
|
||||
{
|
||||
id += (strlen(id) - 4);
|
||||
return (__u16) simple_strtoul(id, &id, 16);
|
||||
}
|
||||
|
||||
static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
||||
void (*idx_reply_cb)(struct qeth_channel *,
|
||||
struct qeth_cmd_buffer *))
|
||||
|
@ -1439,7 +1430,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
|||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
|
||||
if (rc) {
|
||||
PRINT_ERR("Error2 in activating channel rc=%d\n", rc);
|
||||
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
wake_up(&card->wait_q);
|
||||
|
@ -1468,6 +1459,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
|||
__u16 temp;
|
||||
__u8 tmp;
|
||||
int rc;
|
||||
struct ccw_dev_id temp_devid;
|
||||
|
||||
card = CARD_FROM_CDEV(channel->ccwdev);
|
||||
|
||||
|
@ -1494,8 +1486,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
|||
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
|
||||
memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
|
||||
&card->info.func_level, sizeof(__u16));
|
||||
temp = qeth_raw_devno_from_bus_id(CARD_DDEV_ID(card));
|
||||
memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
|
||||
ccw_device_get_id(CARD_DDEV(card), &temp_devid);
|
||||
memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
|
||||
temp = (card->info.cula << 8) + card->info.unit_addr2;
|
||||
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
|
||||
|
||||
|
@ -1508,7 +1500,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
|||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
|
||||
if (rc) {
|
||||
PRINT_ERR("Error1 in activating channel. rc=%d\n", rc);
|
||||
QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
|
||||
rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
wake_up(&card->wait_q);
|
||||
|
@ -1658,7 +1651,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|||
|
||||
reply = qeth_alloc_reply(card);
|
||||
if (!reply) {
|
||||
PRINT_WARN("Could not alloc qeth_reply!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
reply->callback = reply_cb;
|
||||
|
@ -2612,15 +2604,9 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
|
|||
if (newcount < count) {
|
||||
/* we are in memory shortage so we switch back to
|
||||
traditional skb allocation and drop packages */
|
||||
if (!atomic_read(&card->force_alloc_skb) &&
|
||||
net_ratelimit())
|
||||
PRINT_WARN("Switch to alloc skb\n");
|
||||
atomic_set(&card->force_alloc_skb, 3);
|
||||
count = newcount;
|
||||
} else {
|
||||
if ((atomic_read(&card->force_alloc_skb) == 1) &&
|
||||
net_ratelimit())
|
||||
PRINT_WARN("Switch to sg\n");
|
||||
atomic_add_unless(&card->force_alloc_skb, -1, 0);
|
||||
}
|
||||
|
||||
|
@ -3034,7 +3020,7 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
|
|||
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
|
||||
+ skb->len) >> PAGE_SHIFT);
|
||||
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
||||
PRINT_ERR("Invalid size of IP packet "
|
||||
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
|
||||
"(Number=%d / Length=%d). Discarded.\n",
|
||||
(elements_needed+elems), skb->len);
|
||||
return 0;
|
||||
|
@ -3247,8 +3233,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
|||
* free buffers) to handle eddp context */
|
||||
if (qeth_eddp_check_buffers_for_context(queue, ctx)
|
||||
< 0) {
|
||||
if (net_ratelimit())
|
||||
PRINT_WARN("eddp tx_dropped 1\n");
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
@ -3260,7 +3244,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
|||
tmp = qeth_eddp_fill_buffer(queue, ctx,
|
||||
queue->next_buf_to_fill);
|
||||
if (tmp < 0) {
|
||||
PRINT_ERR("eddp tx_dropped 2\n");
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
@ -3602,8 +3585,6 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
|
|||
|
||||
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
|
||||
(!card->options.layer2)) {
|
||||
PRINT_WARN("SNMP Query MIBS not supported "
|
||||
"on %s!\n", QETH_CARD_IFNAME(card));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
/* skip 4 bytes (data_len struct member) to get req_len */
|
||||
|
@ -3634,7 +3615,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
|
|||
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
|
||||
qeth_snmp_command_cb, (void *)&qinfo);
|
||||
if (rc)
|
||||
PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
|
||||
QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
|
||||
QETH_CARD_IFNAME(card), rc);
|
||||
else {
|
||||
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
|
||||
|
@ -3807,8 +3788,8 @@ retry:
|
|||
if (mpno)
|
||||
mpno = min(mpno - 1, QETH_MAX_PORTNO);
|
||||
if (card->info.portno > mpno) {
|
||||
PRINT_ERR("Device %s does not offer port number %d \n.",
|
||||
CARD_BUS_ID(card), card->info.portno);
|
||||
QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d"
|
||||
"\n.", CARD_BUS_ID(card), card->info.portno);
|
||||
rc = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
@ -3985,8 +3966,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
|
|||
return skb;
|
||||
no_mem:
|
||||
if (net_ratelimit()) {
|
||||
PRINT_WARN("No memory for packet received on %s.\n",
|
||||
QETH_CARD_IFNAME(card));
|
||||
QETH_DBF_TEXT(TRACE, 2, "noskbmem");
|
||||
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
|
||||
}
|
||||
|
@ -4004,15 +3983,17 @@ static void qeth_unregister_dbf_views(void)
|
|||
}
|
||||
}
|
||||
|
||||
void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...)
|
||||
void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...)
|
||||
{
|
||||
char dbf_txt_buf[32];
|
||||
va_list args;
|
||||
|
||||
if (level > (qeth_dbf[dbf_nix].id)->level)
|
||||
return;
|
||||
snprintf(dbf_txt_buf, sizeof(dbf_txt_buf), text);
|
||||
va_start(args, fmt);
|
||||
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
|
||||
va_end(args);
|
||||
debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
|
||||
|
||||
|
|
|
@ -122,8 +122,8 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
|
|||
if (element == 0)
|
||||
return -EBUSY;
|
||||
else {
|
||||
PRINT_WARN("could only partially fill eddp "
|
||||
"buffer!\n");
|
||||
QETH_DBF_MESSAGE(2, "could only partially fill"
|
||||
"eddp buffer!\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -143,8 +143,6 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
|
|||
if (must_refcnt) {
|
||||
must_refcnt = 0;
|
||||
if (qeth_eddp_buf_ref_context(buf, ctx)) {
|
||||
PRINT_WARN("no memory to create eddp context "
|
||||
"reference\n");
|
||||
goto out_check;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
|
|||
|
||||
portno = simple_strtoul(buf, &tmp, 16);
|
||||
if (portno > QETH_MAX_PORTNO) {
|
||||
PRINT_WARN("portno 0x%X is out of range\n", portno);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -223,8 +222,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
|
|||
* if though we have to permit priority queueing
|
||||
*/
|
||||
if (card->qdio.no_out_queues == 1) {
|
||||
PRINT_WARN("Priority queueing disabled due "
|
||||
"to hardware limitations!\n");
|
||||
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -250,7 +247,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
|
|||
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
|
||||
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
|
||||
} else {
|
||||
PRINT_WARN("Unknown queueing type '%s'\n", tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -291,9 +287,6 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
|
|||
((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
|
||||
if (old_cnt != cnt) {
|
||||
rc = qeth_realloc_buffer_pool(card, cnt);
|
||||
if (rc)
|
||||
PRINT_WARN("Error (%d) while setting "
|
||||
"buffer count.\n", rc);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
@ -355,7 +348,6 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
|
|||
card->perf_stats.initial_rx_packets = card->stats.rx_packets;
|
||||
card->perf_stats.initial_tx_packets = card->stats.tx_packets;
|
||||
} else {
|
||||
PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -399,7 +391,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
|
|||
newdis = QETH_DISCIPLINE_LAYER2;
|
||||
break;
|
||||
default:
|
||||
PRINT_WARN("layer2: write 0 or 1 to this file!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -463,7 +454,6 @@ static ssize_t qeth_dev_large_send_store(struct device *dev,
|
|||
} else if (!strcmp(tmp, "TSO")) {
|
||||
type = QETH_LARGE_SEND_TSO;
|
||||
} else {
|
||||
PRINT_WARN("large_send: invalid mode %s!\n", tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (card->options.large_send == type)
|
||||
|
@ -503,8 +493,6 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
|
|||
if (i <= max_value) {
|
||||
*value = i;
|
||||
} else {
|
||||
PRINT_WARN("blkt total time: write values between"
|
||||
" 0 and %d to this file!\n", max_value);
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
|
|
@ -101,19 +101,16 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
|
|||
{
|
||||
struct qeth_card *card;
|
||||
struct net_device *ndev;
|
||||
unsigned char *readno;
|
||||
__u16 temp_dev_no, card_dev_no;
|
||||
char *endp;
|
||||
__u16 temp_dev_no;
|
||||
unsigned long flags;
|
||||
struct ccw_dev_id read_devid;
|
||||
|
||||
ndev = NULL;
|
||||
memcpy(&temp_dev_no, read_dev_no, 2);
|
||||
read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
|
||||
list_for_each_entry(card, &qeth_core_card_list.list, list) {
|
||||
readno = CARD_RDEV_ID(card);
|
||||
readno += (strlen(readno) - 4);
|
||||
card_dev_no = simple_strtoul(readno, &endp, 16);
|
||||
if (card_dev_no == temp_dev_no) {
|
||||
ccw_device_get_id(CARD_RDEV(card), &read_devid);
|
||||
if (read_devid.devno == temp_dev_no) {
|
||||
ndev = card->dev;
|
||||
break;
|
||||
}
|
||||
|
@ -134,14 +131,14 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
|
|||
mac = &cmd->data.setdelmac.mac[0];
|
||||
/* MAC already registered, needed in couple/uncouple case */
|
||||
if (cmd->hdr.return_code == 0x2005) {
|
||||
PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
|
||||
QETH_DBF_MESSAGE(2, "Group MAC %02x:%02x:%02x:%02x:%02x:%02x "
|
||||
"already existing on %s \n",
|
||||
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
|
||||
QETH_CARD_IFNAME(card));
|
||||
cmd->hdr.return_code = 0;
|
||||
}
|
||||
if (cmd->hdr.return_code)
|
||||
PRINT_ERR("Could not set group MAC " \
|
||||
QETH_DBF_MESSAGE(2, "Could not set group MAC "
|
||||
"%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
|
||||
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
|
||||
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
|
||||
|
@ -166,7 +163,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
|
|||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
mac = &cmd->data.setdelmac.mac[0];
|
||||
if (cmd->hdr.return_code)
|
||||
PRINT_ERR("Could not delete group MAC " \
|
||||
QETH_DBF_MESSAGE(2, "Could not delete group MAC "
|
||||
"%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
|
||||
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
|
||||
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
|
||||
|
@ -186,10 +183,8 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
|
|||
|
||||
mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
|
||||
|
||||
if (!mc) {
|
||||
PRINT_ERR("no mem vor mc mac address\n");
|
||||
if (!mc)
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
|
||||
mc->mc_addrlen = OSA_ADDR_LEN;
|
||||
|
@ -280,7 +275,7 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
|
|||
QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
if (cmd->hdr.return_code) {
|
||||
PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
|
||||
QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
|
||||
"Continuing\n", cmd->data.setdelvlan.vlan_id,
|
||||
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
|
||||
QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
|
||||
|
@ -333,8 +328,6 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
spin_lock_bh(&card->vlanlock);
|
||||
list_add_tail(&id->list, &card->vid_list);
|
||||
spin_unlock_bh(&card->vlanlock);
|
||||
} else {
|
||||
PRINT_ERR("no memory for vid\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -550,16 +543,15 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
|
|||
|
||||
rc = qeth_query_setadapterparms(card);
|
||||
if (rc) {
|
||||
PRINT_WARN("could not query adapter parameters on device %s: "
|
||||
"x%x\n", CARD_BUS_ID(card), rc);
|
||||
QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
|
||||
"device %s: x%x\n", CARD_BUS_ID(card), rc);
|
||||
}
|
||||
|
||||
if (card->info.guestlan) {
|
||||
rc = qeth_setadpparms_change_macaddr(card);
|
||||
if (rc) {
|
||||
PRINT_WARN("couldn't get MAC address on "
|
||||
"device %s: x%x\n",
|
||||
CARD_BUS_ID(card), rc);
|
||||
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
|
||||
"device %s: x%x\n", CARD_BUS_ID(card), rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
|
||||
return rc;
|
||||
}
|
||||
|
@ -585,8 +577,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
|
|||
}
|
||||
|
||||
if (card->info.type == QETH_CARD_TYPE_OSN) {
|
||||
PRINT_WARN("Setting MAC address on %s is not supported.\n",
|
||||
dev->name);
|
||||
QETH_DBF_TEXT(TRACE, 3, "setmcOSN");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -666,7 +656,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
ctx = qeth_eddp_create_context(card, new_skb, hdr,
|
||||
skb->sk->sk_protocol);
|
||||
if (ctx == NULL) {
|
||||
PRINT_WARN("could not create eddp context\n");
|
||||
QETH_DBF_MESSAGE(2, "could not create eddp context\n");
|
||||
goto tx_drop;
|
||||
}
|
||||
} else {
|
||||
|
@ -731,6 +721,7 @@ tx_drop:
|
|||
if ((new_skb != skb) && new_skb)
|
||||
dev_kfree_skb_any(new_skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
netif_wake_queue(dev);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -1155,7 +1146,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
|
|||
(addr_t) iob, 0, 0);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
|
||||
if (rc) {
|
||||
PRINT_WARN("qeth_osn_send_control_data: "
|
||||
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
|
||||
"ccw_device_start rc = %i\n", rc);
|
||||
QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
|
||||
qeth_release_buffer(iob->channel, iob);
|
||||
|
|
|
@ -311,7 +311,6 @@ static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
|
|||
|
||||
addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
|
||||
if (addr == NULL) {
|
||||
PRINT_WARN("Not enough memory to add address\n");
|
||||
return NULL;
|
||||
}
|
||||
addr->type = QETH_IP_TYPE_NORMAL;
|
||||
|
@ -649,15 +648,6 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card,
|
|||
}
|
||||
}
|
||||
out_inval:
|
||||
PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
|
||||
"Router status set to 'no router'.\n",
|
||||
((*type == PRIMARY_ROUTER)? "primary router" :
|
||||
(*type == SECONDARY_ROUTER)? "secondary router" :
|
||||
(*type == PRIMARY_CONNECTOR)? "primary connector" :
|
||||
(*type == SECONDARY_CONNECTOR)? "secondary connector" :
|
||||
(*type == MULTICAST_ROUTER)? "multicast router" :
|
||||
"unknown"),
|
||||
card->dev->name);
|
||||
*type = NO_ROUTER;
|
||||
}
|
||||
|
||||
|
@ -674,9 +664,9 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
|
|||
QETH_PROT_IPV4);
|
||||
if (rc) {
|
||||
card->options.route4.type = NO_ROUTER;
|
||||
PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
|
||||
"Type set to 'no router'.\n",
|
||||
rc, QETH_CARD_IFNAME(card));
|
||||
QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
|
||||
" on %s. Type set to 'no router'.\n", rc,
|
||||
QETH_CARD_IFNAME(card));
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -697,9 +687,9 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
|
|||
QETH_PROT_IPV6);
|
||||
if (rc) {
|
||||
card->options.route6.type = NO_ROUTER;
|
||||
PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
|
||||
"Type set to 'no router'.\n",
|
||||
rc, QETH_CARD_IFNAME(card));
|
||||
QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
|
||||
" on %s. Type set to 'no router'.\n", rc,
|
||||
QETH_CARD_IFNAME(card));
|
||||
}
|
||||
#endif
|
||||
return rc;
|
||||
|
@ -737,7 +727,6 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
|
|||
if (!memcmp(ipatoe->addr, new->addr,
|
||||
(ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
|
||||
(ipatoe->mask_bits == new->mask_bits)) {
|
||||
PRINT_WARN("ipato entry already exists!\n");
|
||||
rc = -EEXIST;
|
||||
break;
|
||||
}
|
||||
|
@ -802,7 +791,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
|
|||
rc = -EEXIST;
|
||||
spin_unlock_irqrestore(&card->ip_lock, flags);
|
||||
if (rc) {
|
||||
PRINT_WARN("Cannot add VIPA. Address already exists!\n");
|
||||
return rc;
|
||||
}
|
||||
if (!qeth_l3_add_ip(card, ipaddr))
|
||||
|
@ -867,7 +855,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
|
|||
rc = -EEXIST;
|
||||
spin_unlock_irqrestore(&card->ip_lock, flags);
|
||||
if (rc) {
|
||||
PRINT_WARN("Cannot add RXIP. Address already exists!\n");
|
||||
return rc;
|
||||
}
|
||||
if (!qeth_l3_add_ip(card, ipaddr))
|
||||
|
@ -1020,23 +1007,23 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card)
|
|||
IPA_SETADP_SET_BROADCAST_MODE,
|
||||
card->options.broadcast_mode);
|
||||
if (rc)
|
||||
PRINT_WARN("couldn't set broadcast mode on "
|
||||
QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on "
|
||||
"device %s: x%x\n",
|
||||
CARD_BUS_ID(card), rc);
|
||||
rc = qeth_l3_send_setadp_mode(card,
|
||||
IPA_SETADP_ALTER_MAC_ADDRESS,
|
||||
card->options.macaddr_mode);
|
||||
if (rc)
|
||||
PRINT_WARN("couldn't set macaddr mode on "
|
||||
QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on "
|
||||
"device %s: x%x\n", CARD_BUS_ID(card), rc);
|
||||
return rc;
|
||||
}
|
||||
if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
|
||||
PRINT_WARN("set adapter parameters not available "
|
||||
QETH_DBF_MESSAGE(2, "set adapter parameters not available "
|
||||
"to set broadcast mode, using ALLRINGS "
|
||||
"on device %s:\n", CARD_BUS_ID(card));
|
||||
if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
|
||||
PRINT_WARN("set adapter parameters not available "
|
||||
QETH_DBF_MESSAGE(2, "set adapter parameters not available "
|
||||
"to set macaddr mode, using NONCANONICAL "
|
||||
"on device %s:\n", CARD_BUS_ID(card));
|
||||
return 0;
|
||||
|
@ -2070,7 +2057,7 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
|
|||
card = netdev_priv(dev);
|
||||
else if (rc == QETH_VLAN_CARD)
|
||||
card = netdev_priv(vlan_dev_info(dev)->real_dev);
|
||||
if (card->options.layer2)
|
||||
if (card && card->options.layer2)
|
||||
card = NULL;
|
||||
QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
|
||||
return card ;
|
||||
|
@ -2182,8 +2169,6 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
|
|||
if (card->info.guestlan)
|
||||
return -EOPNOTSUPP;
|
||||
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
|
||||
PRINT_WARN("ARP processing not supported "
|
||||
"on %s!\n", QETH_CARD_IFNAME(card));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
|
||||
|
@ -2191,8 +2176,8 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
|
|||
no_entries);
|
||||
if (rc) {
|
||||
tmp = rc;
|
||||
PRINT_WARN("Could not set number of ARP entries on %s: "
|
||||
"%s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
|
||||
QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
|
||||
"%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
|
||||
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
|
||||
}
|
||||
return rc;
|
||||
|
@ -2260,9 +2245,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
|
|||
qdata->no_entries * uentry_size){
|
||||
QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
|
||||
cmd->hdr.return_code = -ENOMEM;
|
||||
PRINT_WARN("query ARP user space buffer is too small for "
|
||||
"the returned number of ARP entries. "
|
||||
"Aborting query!\n");
|
||||
goto out_error;
|
||||
}
|
||||
QETH_DBF_TEXT_(TRACE, 4, "anore%i",
|
||||
|
@ -2324,8 +2306,6 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
|
|||
|
||||
if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
|
||||
IPA_ARP_PROCESSING)) {
|
||||
PRINT_WARN("ARP processing not supported "
|
||||
"on %s!\n", QETH_CARD_IFNAME(card));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
/* get size of userspace buffer and mask_bits -> 6 bytes */
|
||||
|
@ -2344,7 +2324,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
|
|||
qeth_l3_arp_query_cb, (void *)&qinfo);
|
||||
if (rc) {
|
||||
tmp = rc;
|
||||
PRINT_WARN("Error while querying ARP cache on %s: %s "
|
||||
QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s "
|
||||
"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
|
||||
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
|
||||
if (copy_to_user(udata, qinfo.udata, 4))
|
||||
|
@ -2375,8 +2355,6 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
|
|||
if (card->info.guestlan)
|
||||
return -EOPNOTSUPP;
|
||||
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
|
||||
PRINT_WARN("ARP processing not supported "
|
||||
"on %s!\n", QETH_CARD_IFNAME(card));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -2391,10 +2369,9 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
|
|||
if (rc) {
|
||||
tmp = rc;
|
||||
qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
|
||||
PRINT_WARN("Could not add ARP entry for address %s on %s: "
|
||||
"%s (0x%x/%d)\n",
|
||||
buf, QETH_CARD_IFNAME(card),
|
||||
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
|
||||
QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
|
||||
"on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
|
||||
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -2417,8 +2394,6 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
|
|||
if (card->info.guestlan)
|
||||
return -EOPNOTSUPP;
|
||||
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
|
||||
PRINT_WARN("ARP processing not supported "
|
||||
"on %s!\n", QETH_CARD_IFNAME(card));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
memcpy(buf, entry, 12);
|
||||
|
@ -2433,10 +2408,9 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
|
|||
tmp = rc;
|
||||
memset(buf, 0, 16);
|
||||
qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
|
||||
PRINT_WARN("Could not delete ARP entry for address %s on %s: "
|
||||
"%s (0x%x/%d)\n",
|
||||
buf, QETH_CARD_IFNAME(card),
|
||||
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
|
||||
QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
|
||||
" on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
|
||||
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -2456,16 +2430,14 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
|
|||
if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
|
||||
return -EOPNOTSUPP;
|
||||
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
|
||||
PRINT_WARN("ARP processing not supported "
|
||||
"on %s!\n", QETH_CARD_IFNAME(card));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
|
||||
IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
|
||||
if (rc) {
|
||||
tmp = rc;
|
||||
PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
|
||||
QETH_CARD_IFNAME(card),
|
||||
QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
|
||||
"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
|
||||
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
|
||||
}
|
||||
return rc;
|
||||
|
@ -2724,7 +2696,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
ctx = qeth_eddp_create_context(card, new_skb, hdr,
|
||||
skb->sk->sk_protocol);
|
||||
if (ctx == NULL) {
|
||||
PRINT_WARN("could not create eddp context\n");
|
||||
QETH_DBF_MESSAGE(2, "could not create eddp context\n");
|
||||
goto tx_drop;
|
||||
}
|
||||
} else {
|
||||
|
@ -2792,6 +2764,7 @@ tx_drop:
|
|||
if ((new_skb != skb) && new_skb)
|
||||
dev_kfree_skb_any(new_skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
netif_wake_queue(dev);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,6 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
|
|||
} else if (!strcmp(tmp, "multicast_router")) {
|
||||
route->type = MULTICAST_ROUTER;
|
||||
} else {
|
||||
PRINT_WARN("Invalid routing type '%s'.\n", tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (((card->state == CARD_STATE_SOFTSETUP) ||
|
||||
|
@ -137,9 +136,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
if (!qeth_is_supported(card, IPA_IPV6)) {
|
||||
PRINT_WARN("IPv6 not supported for interface %s.\n"
|
||||
"Routing status no changed.\n",
|
||||
QETH_CARD_IFNAME(card));
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -179,7 +175,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
|
|||
if ((i == 0) || (i == 1))
|
||||
card->options.fake_broadcast = i;
|
||||
else {
|
||||
PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -220,7 +215,6 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
|
|||
|
||||
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
|
||||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
|
||||
PRINT_WARN("Device is not a tokenring device!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -233,8 +227,6 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
|
|||
card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
|
||||
return count;
|
||||
} else {
|
||||
PRINT_WARN("broadcast_mode: invalid mode %s!\n",
|
||||
tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -275,7 +267,6 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
|
|||
|
||||
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
|
||||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
|
||||
PRINT_WARN("Device is not a tokenring device!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -285,7 +276,6 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
|
|||
QETH_TR_MACADDR_CANONICAL :
|
||||
QETH_TR_MACADDR_NONCANONICAL;
|
||||
else {
|
||||
PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -327,7 +317,6 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
|
|||
else if (!strcmp(tmp, "no_checksumming"))
|
||||
card->options.checksum_type = NO_CHECKSUMMING;
|
||||
else {
|
||||
PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -382,8 +371,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
|
|||
} else if (!strcmp(tmp, "0")) {
|
||||
card->ipato.enabled = 0;
|
||||
} else {
|
||||
PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
|
||||
"this file\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -422,8 +409,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
|
|||
} else if (!strcmp(tmp, "0")) {
|
||||
card->ipato.invert4 = 0;
|
||||
} else {
|
||||
PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
|
||||
"this file\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -486,13 +471,10 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
|
|||
/* get address string */
|
||||
end = strchr(start, '/');
|
||||
if (!end || (end - start >= 40)) {
|
||||
PRINT_WARN("Invalid format for ipato_addx/delx. "
|
||||
"Use <ip addr>/<mask bits>\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
strncpy(buffer, start, end - start);
|
||||
if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
|
||||
PRINT_WARN("Invalid IP address format!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
start = end + 1;
|
||||
|
@ -500,7 +482,6 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
|
|||
if (!strlen(start) ||
|
||||
(tmp == start) ||
|
||||
(*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
|
||||
PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@ -520,7 +501,6 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
|
|||
|
||||
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
|
||||
if (!ipatoe) {
|
||||
PRINT_WARN("No memory to allocate ipato entry\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
ipatoe->proto = proto;
|
||||
|
@ -609,8 +589,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
|
|||
} else if (!strcmp(tmp, "0")) {
|
||||
card->ipato.invert6 = 0;
|
||||
} else {
|
||||
PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
|
||||
"this file\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
|
@ -724,7 +702,6 @@ static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
|
|||
u8 *addr)
|
||||
{
|
||||
if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
|
||||
PRINT_WARN("Invalid IP address format!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@ -891,7 +868,6 @@ static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
|
|||
u8 *addr)
|
||||
{
|
||||
if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
|
||||
PRINT_WARN("Invalid IP address format!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -396,8 +396,10 @@ static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *op
|
|||
{
|
||||
struct request_sock *req = reqsk_alloc(ops);
|
||||
|
||||
if (req != NULL)
|
||||
if (req != NULL) {
|
||||
inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
|
||||
inet6_rsk(req)->pktopts = NULL;
|
||||
}
|
||||
|
||||
return req;
|
||||
}
|
||||
|
|
|
@ -246,6 +246,7 @@ enum rt_class_t
|
|||
{
|
||||
RT_TABLE_UNSPEC=0,
|
||||
/* User defined values */
|
||||
RT_TABLE_COMPAT=252,
|
||||
RT_TABLE_DEFAULT=253,
|
||||
RT_TABLE_MAIN=254,
|
||||
RT_TABLE_LOCAL=255,
|
||||
|
|
|
@ -100,7 +100,7 @@ extern char * nvram_get(const char *name);
|
|||
/* Get the device MAC address */
|
||||
static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
|
||||
{
|
||||
#ifdef CONFIG_BCM947XX
|
||||
#ifdef CONFIG_BCM47XX
|
||||
char *res = nvram_get("et0macaddr");
|
||||
if (res)
|
||||
memcpy(macaddr, res, 6);
|
||||
|
|
|
@ -38,7 +38,7 @@ struct virtio_net_hdr
|
|||
#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
|
||||
__u8 gso_type;
|
||||
__u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
|
||||
__u16 gso_size; /* Bytes to append to gso_hdr_len per frame */
|
||||
__u16 gso_size; /* Bytes to append to hdr_len per frame */
|
||||
__u16 csum_start; /* Position to start checksumming from */
|
||||
__u16 csum_offset; /* Offset after that to place checksum */
|
||||
};
|
||||
|
|
|
@ -197,4 +197,14 @@ static inline int inet_iif(const struct sk_buff *skb)
|
|||
return skb->rtable->rt_iif;
|
||||
}
|
||||
|
||||
static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
|
||||
{
|
||||
struct request_sock *req = reqsk_alloc(ops);
|
||||
|
||||
if (req != NULL)
|
||||
inet_rsk(req)->opt = NULL;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
#endif /* _INET_SOCK_H */
|
||||
|
|
|
@ -433,7 +433,6 @@ extern struct sk_buff * tcp_make_synack(struct sock *sk,
|
|||
|
||||
extern int tcp_disconnect(struct sock *sk, int flags);
|
||||
|
||||
extern void tcp_unhash(struct sock *sk);
|
||||
|
||||
/* From syncookies.c */
|
||||
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
|
||||
|
|
|
@ -589,7 +589,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||
goto drop;
|
||||
|
||||
req = reqsk_alloc(&dccp_request_sock_ops);
|
||||
req = inet_reqsk_alloc(&dccp_request_sock_ops);
|
||||
if (req == NULL)
|
||||
goto drop;
|
||||
|
||||
|
@ -605,7 +605,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
ireq = inet_rsk(req);
|
||||
ireq->loc_addr = ip_hdr(skb)->daddr;
|
||||
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
||||
ireq->opt = NULL;
|
||||
|
||||
/*
|
||||
* Step 3: Process LISTEN state
|
||||
|
|
|
@ -421,7 +421,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
ireq6 = inet6_rsk(req);
|
||||
ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
|
||||
ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
|
||||
ireq6->pktopts = NULL;
|
||||
|
||||
if (ipv6_opt_accepted(sk, skb) ||
|
||||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
||||
|
|
|
@ -960,7 +960,10 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
|
|||
rtm->rtm_dst_len = dst_len;
|
||||
rtm->rtm_src_len = 0;
|
||||
rtm->rtm_tos = tos;
|
||||
rtm->rtm_table = tb_id;
|
||||
if (tb_id < 256)
|
||||
rtm->rtm_table = tb_id;
|
||||
else
|
||||
rtm->rtm_table = RT_TABLE_COMPAT;
|
||||
NLA_PUT_U32(skb, RTA_TABLE, tb_id);
|
||||
rtm->rtm_type = type;
|
||||
rtm->rtm_flags = fi->fib_flags;
|
||||
|
|
|
@ -285,7 +285,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
|||
cookie_check_timestamp(&tcp_opt);
|
||||
|
||||
ret = NULL;
|
||||
req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */
|
||||
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
|
||||
if (!req)
|
||||
goto out;
|
||||
|
||||
|
@ -301,7 +301,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
|||
ireq->rmt_port = th->source;
|
||||
ireq->loc_addr = ip_hdr(skb)->daddr;
|
||||
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
||||
ireq->opt = NULL;
|
||||
ireq->snd_wscale = tcp_opt.snd_wscale;
|
||||
ireq->rcv_wscale = tcp_opt.rcv_wscale;
|
||||
ireq->sack_ok = tcp_opt.sack_ok;
|
||||
|
|
|
@ -1285,7 +1285,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||
goto drop;
|
||||
|
||||
req = reqsk_alloc(&tcp_request_sock_ops);
|
||||
req = inet_reqsk_alloc(&tcp_request_sock_ops);
|
||||
if (!req)
|
||||
goto drop;
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ lookup_protocol:
|
|||
np->mcast_hops = -1;
|
||||
np->mc_loop = 1;
|
||||
np->pmtudisc = IPV6_PMTUDISC_WANT;
|
||||
np->ipv6only = init_net.ipv6.sysctl.bindv6only;
|
||||
np->ipv6only = net->ipv6.sysctl.bindv6only;
|
||||
|
||||
/* Init the ipv4 part of the socket since we can have sockets
|
||||
* using v6 API for ipv4.
|
||||
|
|
|
@ -198,7 +198,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
|||
ireq = inet_rsk(req);
|
||||
ireq6 = inet6_rsk(req);
|
||||
treq = tcp_rsk(req);
|
||||
ireq6->pktopts = NULL;
|
||||
|
||||
if (security_inet_conn_request(sk, skb, req)) {
|
||||
reqsk_free(req);
|
||||
|
|
|
@ -1299,7 +1299,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
treq = inet6_rsk(req);
|
||||
ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
|
||||
ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
|
||||
treq->pktopts = NULL;
|
||||
if (!want_cookie)
|
||||
TCP_ECN_create_request(req, tcp_hdr(skb));
|
||||
|
||||
|
|
|
@ -3030,6 +3030,9 @@ static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
|
|||
|
||||
static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
|
||||
{
|
||||
if (atomic_read(&pfkey_socks_nr) == 0)
|
||||
return 0;
|
||||
|
||||
switch (c->event) {
|
||||
case XFRM_MSG_EXPIRE:
|
||||
return key_notify_sa_expire(x, c);
|
||||
|
|
|
@ -899,7 +899,7 @@ extern const struct iw_handler_def ieee80211_iw_handler_def;
|
|||
|
||||
|
||||
/* ieee80211_ioctl.c */
|
||||
int ieee80211_set_freq(struct ieee80211_local *local, int freq);
|
||||
int ieee80211_set_freq(struct net_device *dev, int freq);
|
||||
/* ieee80211_sta.c */
|
||||
void ieee80211_sta_timer(unsigned long data);
|
||||
void ieee80211_sta_work(struct work_struct *work);
|
||||
|
|
|
@ -511,6 +511,7 @@ static int ieee80211_stop(struct net_device *dev)
|
|||
case IEEE80211_IF_TYPE_STA:
|
||||
case IEEE80211_IF_TYPE_IBSS:
|
||||
sdata->u.sta.state = IEEE80211_DISABLED;
|
||||
memset(sdata->u.sta.bssid, 0, ETH_ALEN);
|
||||
del_timer_sync(&sdata->u.sta.timer);
|
||||
/*
|
||||
* When we get here, the interface is marked down.
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
|
||||
#define IEEE80211_SCAN_INTERVAL (2 * HZ)
|
||||
#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ)
|
||||
#define IEEE80211_IBSS_JOIN_TIMEOUT (20 * HZ)
|
||||
#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
|
||||
|
||||
#define IEEE80211_PROBE_DELAY (HZ / 33)
|
||||
#define IEEE80211_CHANNEL_TIME (HZ / 33)
|
||||
|
@ -2336,6 +2336,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
|
|||
u8 *pos;
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
struct ieee80211_supported_band *sband;
|
||||
union iwreq_data wrqu;
|
||||
|
||||
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
|
||||
|
||||
|
@ -2358,13 +2359,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
|
|||
sdata->drop_unencrypted = bss->capability &
|
||||
WLAN_CAPABILITY_PRIVACY ? 1 : 0;
|
||||
|
||||
res = ieee80211_set_freq(local, bss->freq);
|
||||
res = ieee80211_set_freq(dev, bss->freq);
|
||||
|
||||
if (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) {
|
||||
printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
|
||||
"%d MHz\n", dev->name, local->oper_channel->center_freq);
|
||||
return -1;
|
||||
}
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
/* Set beacon template */
|
||||
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
|
||||
|
@ -2479,6 +2477,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
|
|||
ifsta->state = IEEE80211_IBSS_JOINED;
|
||||
mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
|
||||
|
||||
memset(&wrqu, 0, sizeof(wrqu));
|
||||
memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
|
||||
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -3486,7 +3488,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
|
|||
spin_unlock_bh(&local->sta_bss_lock);
|
||||
|
||||
if (selected) {
|
||||
ieee80211_set_freq(local, selected->freq);
|
||||
ieee80211_set_freq(dev, selected->freq);
|
||||
if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
|
||||
ieee80211_sta_set_ssid(dev, selected->ssid,
|
||||
selected->ssid_len);
|
||||
|
|
|
@ -290,14 +290,22 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ieee80211_set_freq(struct ieee80211_local *local, int freqMHz)
|
||||
int ieee80211_set_freq(struct net_device *dev, int freqMHz)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct ieee80211_channel *chan;
|
||||
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
|
||||
chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
|
||||
|
||||
if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
|
||||
if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
|
||||
chan->flags & IEEE80211_CHAN_NO_IBSS) {
|
||||
printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
|
||||
"%d MHz\n", dev->name, chan->center_freq);
|
||||
return ret;
|
||||
}
|
||||
local->oper_channel = chan;
|
||||
|
||||
if (local->sta_sw_scanning || local->sta_hw_scanning)
|
||||
|
@ -315,7 +323,6 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
|
|||
struct iw_request_info *info,
|
||||
struct iw_freq *freq, char *extra)
|
||||
{
|
||||
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
|
||||
if (sdata->vif.type == IEEE80211_IF_TYPE_STA)
|
||||
|
@ -329,14 +336,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
|
|||
IEEE80211_STA_AUTO_CHANNEL_SEL;
|
||||
return 0;
|
||||
} else
|
||||
return ieee80211_set_freq(local,
|
||||
return ieee80211_set_freq(dev,
|
||||
ieee80211_channel_to_frequency(freq->m));
|
||||
} else {
|
||||
int i, div = 1000000;
|
||||
for (i = 0; i < freq->e; i++)
|
||||
div /= 10;
|
||||
if (div > 0)
|
||||
return ieee80211_set_freq(local, freq->m / div);
|
||||
return ieee80211_set_freq(dev, freq->m / div);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue