mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: include/net/tcp.h
This commit is contained in:
commit
22f6dacdfc
32 changed files with 623 additions and 184 deletions
|
@ -54,8 +54,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "bnx2"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "2.0.0"
|
||||
#define DRV_MODULE_RELDATE "April 2, 2009"
|
||||
#define DRV_MODULE_VERSION "2.0.1"
|
||||
#define DRV_MODULE_RELDATE "May 6, 2009"
|
||||
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
|
||||
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
|
||||
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
|
||||
|
@ -2600,6 +2600,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
|
|||
/* Tell compiler that status block fields can change. */
|
||||
barrier();
|
||||
cons = *bnapi->hw_tx_cons_ptr;
|
||||
barrier();
|
||||
if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
|
||||
cons++;
|
||||
return cons;
|
||||
|
@ -2879,6 +2880,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
|
|||
/* Tell compiler that status block fields can change. */
|
||||
barrier();
|
||||
cons = *bnapi->hw_rx_cons_ptr;
|
||||
barrier();
|
||||
if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
|
||||
cons++;
|
||||
return cons;
|
||||
|
|
|
@ -1706,10 +1706,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
|
|||
* Called with RTNL
|
||||
*/
|
||||
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
|
||||
__releases(&bond->curr_slave_lock)
|
||||
__releases(&bond->lock)
|
||||
__acquires(&bond->lock)
|
||||
__acquires(&bond->curr_slave_lock)
|
||||
__releases(&bond->lock)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
struct sockaddr *sa = addr;
|
||||
|
@ -1745,9 +1743,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
|
|||
}
|
||||
}
|
||||
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
read_unlock(&bond->lock);
|
||||
|
||||
if (swap_slave) {
|
||||
alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
|
||||
alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
|
||||
|
@ -1755,16 +1750,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
|
|||
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
|
||||
bond->alb_info.rlb_enabled);
|
||||
|
||||
read_lock(&bond->lock);
|
||||
alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
|
||||
if (bond->alb_info.rlb_enabled) {
|
||||
/* inform clients mac address has changed */
|
||||
rlb_req_update_slave_clients(bond, bond->curr_active_slave);
|
||||
}
|
||||
read_unlock(&bond->lock);
|
||||
}
|
||||
|
||||
read_lock(&bond->lock);
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3741,7 +3741,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 rctl, icr = er32(ICR);
|
||||
|
||||
if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
|
||||
if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
|
||||
return IRQ_NONE; /* Not our interrupt */
|
||||
|
||||
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#include <asm/io.h>
|
||||
|
||||
#define DRV_NAME "ehea"
|
||||
#define DRV_VERSION "EHEA_0100"
|
||||
#define DRV_VERSION "EHEA_0101"
|
||||
|
||||
/* eHEA capability flags */
|
||||
#define DLPAR_PORT_ADD_REM 1
|
||||
|
|
|
@ -545,14 +545,17 @@ static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
|
|||
x &= (arr_len - 1);
|
||||
|
||||
pref = skb_array[x];
|
||||
prefetchw(pref);
|
||||
prefetchw(pref + EHEA_CACHE_LINE);
|
||||
if (pref) {
|
||||
prefetchw(pref);
|
||||
prefetchw(pref + EHEA_CACHE_LINE);
|
||||
|
||||
pref = (skb_array[x]->data);
|
||||
prefetch(pref);
|
||||
prefetch(pref + EHEA_CACHE_LINE);
|
||||
prefetch(pref + EHEA_CACHE_LINE * 2);
|
||||
prefetch(pref + EHEA_CACHE_LINE * 3);
|
||||
}
|
||||
|
||||
pref = (skb_array[x]->data);
|
||||
prefetch(pref);
|
||||
prefetch(pref + EHEA_CACHE_LINE);
|
||||
prefetch(pref + EHEA_CACHE_LINE * 2);
|
||||
prefetch(pref + EHEA_CACHE_LINE * 3);
|
||||
skb = skb_array[skb_index];
|
||||
skb_array[skb_index] = NULL;
|
||||
return skb;
|
||||
|
@ -569,12 +572,14 @@ static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
|
|||
x &= (arr_len - 1);
|
||||
|
||||
pref = skb_array[x];
|
||||
prefetchw(pref);
|
||||
prefetchw(pref + EHEA_CACHE_LINE);
|
||||
if (pref) {
|
||||
prefetchw(pref);
|
||||
prefetchw(pref + EHEA_CACHE_LINE);
|
||||
|
||||
pref = (skb_array[x]->data);
|
||||
prefetchw(pref);
|
||||
prefetchw(pref + EHEA_CACHE_LINE);
|
||||
pref = (skb_array[x]->data);
|
||||
prefetchw(pref);
|
||||
prefetchw(pref + EHEA_CACHE_LINE);
|
||||
}
|
||||
|
||||
skb = skb_array[wqe_index];
|
||||
skb_array[wqe_index] = NULL;
|
||||
|
|
|
@ -2010,7 +2010,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 rctl;
|
||||
u32 srrctl = 0;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
rctl = rd32(E1000_RCTL);
|
||||
|
||||
|
@ -2075,8 +2075,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
|
|||
if (adapter->vfs_allocated_count) {
|
||||
u32 vmolr;
|
||||
|
||||
j = adapter->rx_ring[0].reg_idx;
|
||||
|
||||
/* set all queue drop enable bits */
|
||||
wr32(E1000_QDE, ALL_QUEUES);
|
||||
srrctl |= E1000_SRRCTL_DROP_EN;
|
||||
|
@ -2084,16 +2082,16 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
|
|||
/* disable queue 0 to prevent tail write w/o re-config */
|
||||
wr32(E1000_RXDCTL(0), 0);
|
||||
|
||||
vmolr = rd32(E1000_VMOLR(j));
|
||||
vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
|
||||
if (rctl & E1000_RCTL_LPE)
|
||||
vmolr |= E1000_VMOLR_LPE;
|
||||
if (adapter->num_rx_queues > 0)
|
||||
if (adapter->num_rx_queues > 1)
|
||||
vmolr |= E1000_VMOLR_RSSE;
|
||||
wr32(E1000_VMOLR(j), vmolr);
|
||||
wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
j = adapter->rx_ring[i].reg_idx;
|
||||
int j = adapter->rx_ring[i].reg_idx;
|
||||
wr32(E1000_SRRCTL(j), srrctl);
|
||||
}
|
||||
|
||||
|
|
|
@ -180,6 +180,20 @@ config USB_NET_CDCETHER
|
|||
IEEE 802 "local assignment" bit is set in the address, a "usbX"
|
||||
name is used instead.
|
||||
|
||||
config USB_NET_CDC_EEM
|
||||
tristate "CDC EEM support"
|
||||
depends on USB_USBNET && EXPERIMENTAL
|
||||
help
|
||||
This option supports devices conforming to the Communication Device
|
||||
Class (CDC) Ethernet Emulation Model, a specification that's easy to
|
||||
implement in device firmware. The CDC EEM specifications are available
|
||||
from <http://www.usb.org/>.
|
||||
|
||||
This driver creates an interface named "ethX", where X depends on
|
||||
what other networking devices you have in use. However, if the
|
||||
IEEE 802 "local assignment" bit is set in the address, a "usbX"
|
||||
name is used instead.
|
||||
|
||||
config USB_NET_DM9601
|
||||
tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
|
||||
depends on USB_USBNET
|
||||
|
|
|
@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
|
|||
obj-$(CONFIG_USB_HSO) += hso.o
|
||||
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
|
||||
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
|
||||
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
|
||||
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
|
||||
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
|
||||
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
|
||||
|
|
381
drivers/net/usb/cdc_eem.c
Normal file
381
drivers/net/usb/cdc_eem.c
Normal file
|
@ -0,0 +1,381 @@
|
|||
/*
|
||||
* USB CDC EEM network interface driver
|
||||
* Copyright (C) 2009 Oberthur Technologies
|
||||
* by Omar Laazimani, Olivier Condemine
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/usb/cdc.h>
|
||||
#include <linux/usb/usbnet.h>
|
||||
|
||||
|
||||
/*
|
||||
* This driver is an implementation of the CDC "Ethernet Emulation
|
||||
* Model" (EEM) specification, which encapsulates Ethernet frames
|
||||
* for transport over USB using a simpler USB device model than the
|
||||
* previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet").
|
||||
*
|
||||
* For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf
|
||||
*
|
||||
* This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24,
|
||||
* 2.6.27 and 2.6.30rc2 kernel.
|
||||
* It has also been validated on Openmoko Om 2008.12 (based on 2.6.24 kernel).
|
||||
* build on 23-April-2009
|
||||
*/
|
||||
|
||||
#define EEM_HEAD 2 /* 2 byte header */
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
static void eem_linkcmd_complete(struct urb *urb)
|
||||
{
|
||||
dev_kfree_skb(urb->context);
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
||||
static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct urb *urb;
|
||||
int status;
|
||||
|
||||
urb = usb_alloc_urb(0, GFP_ATOMIC);
|
||||
if (!urb)
|
||||
goto fail;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev, dev->out,
|
||||
skb->data, skb->len, eem_linkcmd_complete, skb);
|
||||
|
||||
status = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (status) {
|
||||
usb_free_urb(urb);
|
||||
fail:
|
||||
dev_kfree_skb(skb);
|
||||
devwarn(dev, "link cmd failure\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
status = usbnet_get_endpoints(dev, intf);
|
||||
if (status < 0) {
|
||||
usb_set_intfdata(intf, NULL);
|
||||
usb_driver_release_interface(driver_of(intf), intf);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* no jumbogram (16K) support for now */
|
||||
|
||||
dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* EEM permits packing multiple Ethernet frames into USB transfers
|
||||
* (a "bundle"), but for TX we don't try to do that.
|
||||
*/
|
||||
static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct sk_buff *skb2 = NULL;
|
||||
u16 len = skb->len;
|
||||
u32 crc = 0;
|
||||
int padlen = 0;
|
||||
|
||||
/* When ((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket) is
|
||||
* zero, stick two bytes of zero length EEM packet on the end.
|
||||
* Else the framework would add invalid single byte padding,
|
||||
* since it can't know whether ZLPs will be handled right by
|
||||
* all the relevant hardware and software.
|
||||
*/
|
||||
if (!((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket))
|
||||
padlen += 2;
|
||||
|
||||
if (!skb_cloned(skb)) {
|
||||
int headroom = skb_headroom(skb);
|
||||
int tailroom = skb_tailroom(skb);
|
||||
|
||||
if ((tailroom >= ETH_FCS_LEN + padlen)
|
||||
&& (headroom >= EEM_HEAD))
|
||||
goto done;
|
||||
|
||||
if ((headroom + tailroom)
|
||||
> (EEM_HEAD + ETH_FCS_LEN + padlen)) {
|
||||
skb->data = memmove(skb->head +
|
||||
EEM_HEAD,
|
||||
skb->data,
|
||||
skb->len);
|
||||
skb_set_tail_pointer(skb, len);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
|
||||
if (!skb2)
|
||||
return NULL;
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
|
||||
done:
|
||||
/* we don't use the "no Ethernet CRC" option */
|
||||
crc = crc32_le(~0, skb->data, skb->len);
|
||||
crc = ~crc;
|
||||
|
||||
put_unaligned_le32(crc, skb_put(skb, 4));
|
||||
|
||||
/* EEM packet header format:
|
||||
* b0..13: length of ethernet frame
|
||||
* b14: bmCRC (1 == valid Ethernet CRC)
|
||||
* b15: bmType (0 == data)
|
||||
*/
|
||||
len = skb->len;
|
||||
put_unaligned_le16(BIT(14) | len, skb_push(skb, 2));
|
||||
|
||||
/* Bundle a zero length EEM packet if needed */
|
||||
if (padlen)
|
||||
put_unaligned_le16(0, skb_put(skb, 2));
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
{
|
||||
/*
|
||||
* Our task here is to strip off framing, leaving skb with one
|
||||
* data frame for the usbnet framework code to process. But we
|
||||
* may have received multiple EEM payloads, or command payloads.
|
||||
* So we must process _everything_ as if it's a header, except
|
||||
* maybe the last data payload
|
||||
*
|
||||
* REVISIT the framework needs updating so that when we consume
|
||||
* all payloads (the last or only message was a command, or a
|
||||
* zero length EEM packet) that is not accounted as an rx_error.
|
||||
*/
|
||||
do {
|
||||
struct sk_buff *skb2 = NULL;
|
||||
u16 header;
|
||||
u16 len = 0;
|
||||
|
||||
/* incomplete EEM header? */
|
||||
if (skb->len < EEM_HEAD)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* EEM packet header format:
|
||||
* b0..14: EEM type dependant (Data or Command)
|
||||
* b15: bmType
|
||||
*/
|
||||
header = get_unaligned_le16(skb->data);
|
||||
skb_pull(skb, EEM_HEAD);
|
||||
|
||||
/*
|
||||
* The bmType bit helps to denote when EEM
|
||||
* packet is data or command :
|
||||
* bmType = 0 : EEM data payload
|
||||
* bmType = 1 : EEM (link) command
|
||||
*/
|
||||
if (header & BIT(15)) {
|
||||
u16 bmEEMCmd;
|
||||
|
||||
/*
|
||||
* EEM (link) command packet:
|
||||
* b0..10: bmEEMCmdParam
|
||||
* b11..13: bmEEMCmd
|
||||
* b14: bmReserved (must be 0)
|
||||
* b15: 1 (EEM command)
|
||||
*/
|
||||
if (header & BIT(14)) {
|
||||
devdbg(dev, "reserved command %04x\n", header);
|
||||
continue;
|
||||
}
|
||||
|
||||
bmEEMCmd = (header >> 11) & 0x7;
|
||||
switch (bmEEMCmd) {
|
||||
|
||||
/* Responding to echo requests is mandatory. */
|
||||
case 0: /* Echo command */
|
||||
len = header & 0x7FF;
|
||||
|
||||
/* bogus command? */
|
||||
if (skb->len < len)
|
||||
return 0;
|
||||
|
||||
skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!skb2))
|
||||
goto next;
|
||||
skb_trim(skb2, len);
|
||||
put_unaligned_le16(BIT(15) | (1 << 11) | len,
|
||||
skb_push(skb2, 2));
|
||||
eem_linkcmd(dev, skb2);
|
||||
break;
|
||||
|
||||
/*
|
||||
* Host may choose to ignore hints.
|
||||
* - suspend: peripheral ready to suspend
|
||||
* - response: suggest N millisec polling
|
||||
* - response complete: suggest N sec polling
|
||||
*/
|
||||
case 2: /* Suspend hint */
|
||||
case 3: /* Response hint */
|
||||
case 4: /* Response complete hint */
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Hosts should never receive host-to-peripheral
|
||||
* or reserved command codes; or responses to an
|
||||
* echo command we didn't send.
|
||||
*/
|
||||
case 1: /* Echo response */
|
||||
case 5: /* Tickle */
|
||||
default: /* reserved */
|
||||
devwarn(dev, "unexpected link command %d\n",
|
||||
bmEEMCmd);
|
||||
continue;
|
||||
}
|
||||
|
||||
} else {
|
||||
u32 crc, crc2;
|
||||
int is_last;
|
||||
|
||||
/* zero length EEM packet? */
|
||||
if (header == 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* EEM data packet header :
|
||||
* b0..13: length of ethernet frame
|
||||
* b14: bmCRC
|
||||
* b15: 0 (EEM data)
|
||||
*/
|
||||
len = header & 0x3FFF;
|
||||
|
||||
/* bogus EEM payload? */
|
||||
if (skb->len < len)
|
||||
return 0;
|
||||
|
||||
/* bogus ethernet frame? */
|
||||
if (len < (ETH_HLEN + ETH_FCS_LEN))
|
||||
goto next;
|
||||
|
||||
/*
|
||||
* Treat the last payload differently: framework
|
||||
* code expects our "fixup" to have stripped off
|
||||
* headers, so "skb" is a data packet (or error).
|
||||
* Else if it's not the last payload, keep "skb"
|
||||
* for further processing.
|
||||
*/
|
||||
is_last = (len == skb->len);
|
||||
if (is_last)
|
||||
skb2 = skb;
|
||||
else {
|
||||
skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!skb2))
|
||||
return 0;
|
||||
}
|
||||
|
||||
crc = get_unaligned_le32(skb2->data
|
||||
+ len - ETH_FCS_LEN);
|
||||
skb_trim(skb2, len - ETH_FCS_LEN);
|
||||
|
||||
/*
|
||||
* The bmCRC helps to denote when the CRC field in
|
||||
* the Ethernet frame contains a calculated CRC:
|
||||
* bmCRC = 1 : CRC is calculated
|
||||
* bmCRC = 0 : CRC = 0xDEADBEEF
|
||||
*/
|
||||
if (header & BIT(14))
|
||||
crc2 = ~crc32_le(~0, skb2->data, len);
|
||||
else
|
||||
crc2 = 0xdeadbeef;
|
||||
|
||||
if (is_last)
|
||||
return crc == crc2;
|
||||
|
||||
if (unlikely(crc != crc2)) {
|
||||
dev->stats.rx_errors++;
|
||||
dev_kfree_skb_any(skb2);
|
||||
} else
|
||||
usbnet_skb_return(dev, skb2);
|
||||
}
|
||||
|
||||
next:
|
||||
skb_pull(skb, len);
|
||||
} while (skb->len);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct driver_info eem_info = {
|
||||
.description = "CDC EEM Device",
|
||||
.flags = FLAG_ETHER,
|
||||
.bind = eem_bind,
|
||||
.rx_fixup = eem_rx_fixup,
|
||||
.tx_fixup = eem_tx_fixup,
|
||||
};
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
{
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM,
|
||||
USB_CDC_PROTO_EEM),
|
||||
.driver_info = (unsigned long) &eem_info,
|
||||
},
|
||||
{
|
||||
/* EMPTY == end of list */
|
||||
},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, products);
|
||||
|
||||
static struct usb_driver eem_driver = {
|
||||
.name = "cdc_eem",
|
||||
.id_table = products,
|
||||
.probe = usbnet_probe,
|
||||
.disconnect = usbnet_disconnect,
|
||||
.suspend = usbnet_suspend,
|
||||
.resume = usbnet_resume,
|
||||
};
|
||||
|
||||
|
||||
static int __init eem_init(void)
|
||||
{
|
||||
return usb_register(&eem_driver);
|
||||
}
|
||||
module_init(eem_init);
|
||||
|
||||
static void __exit eem_exit(void)
|
||||
{
|
||||
usb_deregister(&eem_driver);
|
||||
}
|
||||
module_exit(eem_exit);
|
||||
|
||||
MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>");
|
||||
MODULE_DESCRIPTION("USB CDC EEM");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -719,6 +719,14 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
|
|||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
__le16 key_flags = 0;
|
||||
|
||||
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (sta_id == priv->hw_params.bcast_sta_id)
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
||||
|
@ -738,6 +746,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
|
|||
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
||||
"no space for a new key");
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
|
||||
|
||||
/* This copy is acutally not needed: we get the key with each TX */
|
||||
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
|
||||
|
||||
|
@ -754,9 +765,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
|
|||
{
|
||||
u8 sta_id = IWL_INVALID_STATION;
|
||||
unsigned long flags;
|
||||
__le16 key_flags = 0;
|
||||
int i;
|
||||
DECLARE_MAC_BUF(mac);
|
||||
|
||||
sta_id = priv->cfg->ops->smgmt->find_station(priv, addr);
|
||||
if (sta_id == IWL_INVALID_STATION) {
|
||||
|
@ -771,16 +780,8 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
|
|||
return;
|
||||
}
|
||||
|
||||
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (sta_id == priv->hw_params.bcast_sta_id)
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
|
||||
|
||||
for (i = 0; i < 5; i++)
|
||||
|
|
|
@ -1462,7 +1462,6 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
|
|||
rxq->bd = NULL;
|
||||
rxq->rb_stts = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(iwl3945_rx_queue_free);
|
||||
|
||||
|
||||
/* Convert linear signal-to-noise ratio into dB */
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _XT_LED_H
|
||||
#define _XT_LED_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xt_led_info {
|
||||
char id[27]; /* Unique ID for this trigger in the LED class */
|
||||
__u8 always_blink; /* Blink even if the LED is already on */
|
||||
|
|
|
@ -12,4 +12,6 @@ struct xt_cluster_match_info {
|
|||
u_int32_t flags;
|
||||
};
|
||||
|
||||
#define XT_CLUSTER_NODES_MAX 32
|
||||
|
||||
#endif /* _XT_CLUSTER_MATCH_H */
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define USB_CDC_SUBCLASS_DMM 0x09
|
||||
#define USB_CDC_SUBCLASS_MDLM 0x0a
|
||||
#define USB_CDC_SUBCLASS_OBEX 0x0b
|
||||
#define USB_CDC_SUBCLASS_EEM 0x0c
|
||||
|
||||
#define USB_CDC_PROTO_NONE 0
|
||||
|
||||
|
@ -28,6 +29,8 @@
|
|||
#define USB_CDC_ACM_PROTO_AT_CDMA 6
|
||||
#define USB_CDC_ACM_PROTO_VENDOR 0xff
|
||||
|
||||
#define USB_CDC_PROTO_EEM 7
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
|
|
|
@ -457,6 +457,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
|
|||
|
||||
int hci_register_sysfs(struct hci_dev *hdev);
|
||||
void hci_unregister_sysfs(struct hci_dev *hdev);
|
||||
void hci_conn_init_sysfs(struct hci_conn *conn);
|
||||
void hci_conn_add_sysfs(struct hci_conn *conn);
|
||||
void hci_conn_del_sysfs(struct hci_conn *conn);
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <net/ip.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/inet_ecn.h>
|
||||
#include <net/dst.h>
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
|
@ -543,6 +544,17 @@ static inline void tcp_fast_path_check(struct sock *sk)
|
|||
tcp_fast_path_on(tp);
|
||||
}
|
||||
|
||||
/* Compute the actual rto_min value */
|
||||
static inline u32 tcp_rto_min(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
u32 rto_min = TCP_RTO_MIN;
|
||||
|
||||
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
|
||||
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
|
||||
return rto_min;
|
||||
}
|
||||
|
||||
/* Compute the actual receive window we are currently advertising.
|
||||
* Rcv_nxt can be after the window if our peer push more data
|
||||
* than the offered window.
|
||||
|
@ -912,7 +924,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
|
|||
POLLIN | POLLRDNORM | POLLRDBAND);
|
||||
if (!inet_csk_ack_scheduled(sk))
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
||||
(3 * TCP_RTO_MIN) / 4,
|
||||
(3 * tcp_rto_min(sk)) / 4,
|
||||
TCP_RTO_MAX);
|
||||
}
|
||||
return 1;
|
||||
|
|
|
@ -119,12 +119,6 @@ menuconfig NETFILTER
|
|||
<file:Documentation/Changes> under "iptables" for the location of
|
||||
these packages.
|
||||
|
||||
Make sure to say N to "Fast switching" below if you intend to say Y
|
||||
here, as Fast switching currently bypasses netfilter.
|
||||
|
||||
Chances are that you should say Y here if you compile a kernel which
|
||||
will run as a router and N for regular hosts. If unsure, say N.
|
||||
|
||||
if NETFILTER
|
||||
|
||||
config NETFILTER_DEBUG
|
||||
|
|
|
@ -248,6 +248,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
|
|||
if (hdev->notify)
|
||||
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
|
||||
|
||||
hci_conn_init_sysfs(conn);
|
||||
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
|
||||
return conn;
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
struct class *bt_class = NULL;
|
||||
EXPORT_SYMBOL_GPL(bt_class);
|
||||
|
||||
static struct workqueue_struct *bluetooth;
|
||||
static struct workqueue_struct *bt_workq;
|
||||
|
||||
static inline char *link_typetostr(int type)
|
||||
{
|
||||
|
@ -88,9 +88,12 @@ static struct device_type bt_link = {
|
|||
static void add_conn(struct work_struct *work)
|
||||
{
|
||||
struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
/* ensure previous add/del is complete */
|
||||
flush_workqueue(bluetooth);
|
||||
/* ensure previous del is complete */
|
||||
flush_work(&conn->work_del);
|
||||
|
||||
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
|
||||
|
||||
if (device_add(&conn->dev) < 0) {
|
||||
BT_ERR("Failed to register connection device");
|
||||
|
@ -98,27 +101,6 @@ static void add_conn(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
void hci_conn_add_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
conn->dev.type = &bt_link;
|
||||
conn->dev.class = bt_class;
|
||||
conn->dev.parent = &hdev->dev;
|
||||
|
||||
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
device_initialize(&conn->dev);
|
||||
|
||||
INIT_WORK(&conn->work_add, add_conn);
|
||||
|
||||
queue_work(bluetooth, &conn->work_add);
|
||||
}
|
||||
|
||||
/*
|
||||
* The rfcomm tty device will possibly retain even when conn
|
||||
* is down, and sysfs doesn't support move zombie device,
|
||||
|
@ -134,8 +116,11 @@ static void del_conn(struct work_struct *work)
|
|||
struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
/* ensure previous add/del is complete */
|
||||
flush_workqueue(bluetooth);
|
||||
/* ensure previous add is complete */
|
||||
flush_work(&conn->work_add);
|
||||
|
||||
if (!device_is_registered(&conn->dev))
|
||||
return;
|
||||
|
||||
while (1) {
|
||||
struct device *dev;
|
||||
|
@ -152,16 +137,36 @@ static void del_conn(struct work_struct *work)
|
|||
hci_dev_put(hdev);
|
||||
}
|
||||
|
||||
void hci_conn_init_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
conn->dev.type = &bt_link;
|
||||
conn->dev.class = bt_class;
|
||||
conn->dev.parent = &hdev->dev;
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
device_initialize(&conn->dev);
|
||||
|
||||
INIT_WORK(&conn->work_add, add_conn);
|
||||
INIT_WORK(&conn->work_del, del_conn);
|
||||
}
|
||||
|
||||
void hci_conn_add_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
queue_work(bt_workq, &conn->work_add);
|
||||
}
|
||||
|
||||
void hci_conn_del_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
if (!device_is_registered(&conn->dev))
|
||||
return;
|
||||
|
||||
INIT_WORK(&conn->work_del, del_conn);
|
||||
|
||||
queue_work(bluetooth, &conn->work_del);
|
||||
queue_work(bt_workq, &conn->work_del);
|
||||
}
|
||||
|
||||
static inline char *host_typetostr(int type)
|
||||
|
@ -438,13 +443,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
|
|||
|
||||
int __init bt_sysfs_init(void)
|
||||
{
|
||||
bluetooth = create_singlethread_workqueue("bluetooth");
|
||||
if (!bluetooth)
|
||||
bt_workq = create_singlethread_workqueue("bluetooth");
|
||||
if (!bt_workq)
|
||||
return -ENOMEM;
|
||||
|
||||
bt_class = class_create(THIS_MODULE, "bluetooth");
|
||||
if (IS_ERR(bt_class)) {
|
||||
destroy_workqueue(bluetooth);
|
||||
destroy_workqueue(bt_workq);
|
||||
return PTR_ERR(bt_class);
|
||||
}
|
||||
|
||||
|
@ -453,7 +458,7 @@ int __init bt_sysfs_init(void)
|
|||
|
||||
void bt_sysfs_cleanup(void)
|
||||
{
|
||||
destroy_workqueue(bluetooth);
|
||||
destroy_workqueue(bt_workq);
|
||||
|
||||
class_destroy(bt_class);
|
||||
}
|
||||
|
|
|
@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
|
|||
shinfo->gso_segs = 0;
|
||||
shinfo->gso_type = 0;
|
||||
shinfo->ip6_frag_id = 0;
|
||||
shinfo->tx_flags.flags = 0;
|
||||
shinfo->frag_list = NULL;
|
||||
memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
|
||||
|
||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||
skb->data = skb->head + NET_SKB_PAD;
|
||||
|
|
|
@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
|
|||
tcp_grow_window(sk, skb);
|
||||
}
|
||||
|
||||
static u32 tcp_rto_min(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
u32 rto_min = TCP_RTO_MIN;
|
||||
|
||||
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
|
||||
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
|
||||
return rto_min;
|
||||
}
|
||||
|
||||
/* Called to compute a smoothed rtt estimate. The data fed to this
|
||||
* routine either comes from timestamps, or from segments that were
|
||||
* known _not_ to have been retransmitted [see Karn/Partridge
|
||||
|
|
|
@ -50,14 +50,14 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
|
|||
struct ipv6_opt_hdr _hdr;
|
||||
int hdrlen;
|
||||
|
||||
/* Is there enough space for the next ext header? */
|
||||
if (len < (int)sizeof(struct ipv6_opt_hdr))
|
||||
return false;
|
||||
/* No more exthdr -> evaluate */
|
||||
if (nexthdr == NEXTHDR_NONE) {
|
||||
temp |= MASK_NONE;
|
||||
break;
|
||||
}
|
||||
/* Is there enough space for the next ext header? */
|
||||
if (len < (int)sizeof(struct ipv6_opt_hdr))
|
||||
return false;
|
||||
/* ESP -> evaluate */
|
||||
if (nexthdr == NEXTHDR_ESP) {
|
||||
temp |= MASK_ESP;
|
||||
|
|
|
@ -476,7 +476,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
|
|||
return NULL;
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
|
||||
sband = hw->wiphy->bands[hw->conf.channel->band];
|
||||
sband = hw->wiphy->bands[i];
|
||||
if (sband->n_bitrates > max_rates)
|
||||
max_rates = sband->n_bitrates;
|
||||
}
|
||||
|
|
|
@ -317,57 +317,17 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
|
|||
struct ieee80211_sta *sta, void *priv_sta)
|
||||
{
|
||||
struct rc_pid_sta_info *spinfo = priv_sta;
|
||||
struct rc_pid_info *pinfo = priv;
|
||||
struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
|
||||
struct sta_info *si;
|
||||
int i, j, tmp;
|
||||
bool s;
|
||||
|
||||
/* TODO: This routine should consider using RSSI from previous packets
|
||||
* as we need to have IEEE 802.1X auth succeed immediately after assoc..
|
||||
* Until that method is implemented, we will use the lowest supported
|
||||
* rate as a workaround. */
|
||||
|
||||
spinfo->txrate_idx = rate_lowest_index(sband, sta);
|
||||
/* HACK */
|
||||
si = container_of(sta, struct sta_info, sta);
|
||||
si->fail_avg = 0;
|
||||
}
|
||||
|
||||
static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
|
||||
struct dentry *debugfsdir)
|
||||
{
|
||||
struct rc_pid_info *pinfo;
|
||||
struct rc_pid_rateinfo *rinfo;
|
||||
struct ieee80211_supported_band *sband;
|
||||
int i, j, tmp;
|
||||
bool s;
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
struct rc_pid_debugfs_entries *de;
|
||||
#endif
|
||||
|
||||
sband = hw->wiphy->bands[hw->conf.channel->band];
|
||||
|
||||
pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
|
||||
if (!pinfo)
|
||||
return NULL;
|
||||
|
||||
/* We can safely assume that sband won't change unless we get
|
||||
* reinitialized. */
|
||||
rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
|
||||
if (!rinfo) {
|
||||
kfree(pinfo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pinfo->target = RC_PID_TARGET_PF;
|
||||
pinfo->sampling_period = RC_PID_INTERVAL;
|
||||
pinfo->coeff_p = RC_PID_COEFF_P;
|
||||
pinfo->coeff_i = RC_PID_COEFF_I;
|
||||
pinfo->coeff_d = RC_PID_COEFF_D;
|
||||
pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT;
|
||||
pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR;
|
||||
pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION;
|
||||
pinfo->norm_offset = RC_PID_NORM_OFFSET;
|
||||
pinfo->rinfo = rinfo;
|
||||
pinfo->oldrate = 0;
|
||||
|
||||
/* Sort the rates. This is optimized for the most common case (i.e.
|
||||
* almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
|
||||
* mapping too. */
|
||||
|
@ -395,6 +355,51 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
|
|||
break;
|
||||
}
|
||||
|
||||
spinfo->txrate_idx = rate_lowest_index(sband, sta);
|
||||
/* HACK */
|
||||
si = container_of(sta, struct sta_info, sta);
|
||||
si->fail_avg = 0;
|
||||
}
|
||||
|
||||
static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
|
||||
struct dentry *debugfsdir)
|
||||
{
|
||||
struct rc_pid_info *pinfo;
|
||||
struct rc_pid_rateinfo *rinfo;
|
||||
struct ieee80211_supported_band *sband;
|
||||
int i, max_rates = 0;
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
struct rc_pid_debugfs_entries *de;
|
||||
#endif
|
||||
|
||||
pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
|
||||
if (!pinfo)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
|
||||
sband = hw->wiphy->bands[i];
|
||||
if (sband->n_bitrates > max_rates)
|
||||
max_rates = sband->n_bitrates;
|
||||
}
|
||||
|
||||
rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
|
||||
if (!rinfo) {
|
||||
kfree(pinfo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pinfo->target = RC_PID_TARGET_PF;
|
||||
pinfo->sampling_period = RC_PID_INTERVAL;
|
||||
pinfo->coeff_p = RC_PID_COEFF_P;
|
||||
pinfo->coeff_i = RC_PID_COEFF_I;
|
||||
pinfo->coeff_d = RC_PID_COEFF_D;
|
||||
pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT;
|
||||
pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR;
|
||||
pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION;
|
||||
pinfo->norm_offset = RC_PID_NORM_OFFSET;
|
||||
pinfo->rinfo = rinfo;
|
||||
pinfo->oldrate = 0;
|
||||
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
de = &pinfo->dentries;
|
||||
de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
|
||||
|
|
|
@ -787,7 +787,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
|
|||
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
/* internal error, why is TX_FRAGMENTED set? */
|
||||
if (WARN_ON(skb->len <= frag_threshold))
|
||||
if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
|
||||
return TX_DROP;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1186,28 +1186,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report)
|
||||
{
|
||||
unsigned int events = 0;
|
||||
|
||||
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
|
||||
events |= IPCT_RELATED;
|
||||
else
|
||||
events |= IPCT_NEW;
|
||||
|
||||
nf_conntrack_event_report(IPCT_STATUS |
|
||||
IPCT_HELPER |
|
||||
IPCT_REFRESH |
|
||||
IPCT_PROTOINFO |
|
||||
IPCT_NATSEQADJ |
|
||||
IPCT_MARK |
|
||||
events,
|
||||
ct,
|
||||
pid,
|
||||
report);
|
||||
}
|
||||
|
||||
static struct nf_conn *
|
||||
ctnetlink_create_conntrack(struct nlattr *cda[],
|
||||
struct nf_conntrack_tuple *otuple,
|
||||
|
@ -1373,6 +1351,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
err = -ENOENT;
|
||||
if (nlh->nlmsg_flags & NLM_F_CREATE) {
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_events events;
|
||||
|
||||
ct = ctnetlink_create_conntrack(cda, &otuple,
|
||||
&rtuple, u3);
|
||||
|
@ -1383,9 +1362,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
err = 0;
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
ctnetlink_event_report(ct,
|
||||
NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
|
||||
events = IPCT_RELATED;
|
||||
else
|
||||
events = IPCT_NEW;
|
||||
|
||||
nf_conntrack_event_report(IPCT_STATUS |
|
||||
IPCT_HELPER |
|
||||
IPCT_PROTOINFO |
|
||||
IPCT_NATSEQADJ |
|
||||
IPCT_MARK | events,
|
||||
ct, NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
nf_ct_put(ct);
|
||||
} else
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
|
@ -1404,9 +1392,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
if (err == 0) {
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
ctnetlink_event_report(ct,
|
||||
NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
nf_conntrack_event_report(IPCT_STATUS |
|
||||
IPCT_HELPER |
|
||||
IPCT_PROTOINFO |
|
||||
IPCT_NATSEQADJ |
|
||||
IPCT_MARK,
|
||||
ct, NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
nf_ct_put(ct);
|
||||
} else
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
|
|
|
@ -135,7 +135,13 @@ static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
|
|||
{
|
||||
struct xt_cluster_match_info *info = par->matchinfo;
|
||||
|
||||
if (info->node_mask >= (1 << info->total_nodes)) {
|
||||
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
|
||||
printk(KERN_ERR "xt_cluster: you have exceeded the maximum "
|
||||
"number of cluster nodes (%u > %u)\n",
|
||||
info->total_nodes, XT_CLUSTER_NODES_MAX);
|
||||
return false;
|
||||
}
|
||||
if (info->node_mask >= (1ULL << info->total_nodes)) {
|
||||
printk(KERN_ERR "xt_cluster: this node mask cannot be "
|
||||
"higher than the total number of nodes\n");
|
||||
return false;
|
||||
|
|
|
@ -51,7 +51,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
|
||||
|
||||
if (sch->ops == &bfifo_qdisc_ops)
|
||||
limit *= qdisc_dev(sch)->mtu;
|
||||
limit *= psched_mtu(qdisc_dev(sch));
|
||||
|
||||
q->limit = limit;
|
||||
} else {
|
||||
|
|
|
@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
|
|||
}
|
||||
result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
|
||||
if (result < 0) {
|
||||
dev_err(dev, "no memory to add payload in attribute\n");
|
||||
dev_err(dev, "no memory to add payload (msg %p size %zu) in "
|
||||
"attribute: %d\n", msg, size, result);
|
||||
goto error_nla_put;
|
||||
}
|
||||
genlmsg_end(skb, genl_msg);
|
||||
|
@ -299,10 +300,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
|
|||
struct sk_buff *skb;
|
||||
|
||||
skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
|
||||
if (skb == NULL)
|
||||
goto error_msg_new;
|
||||
result = wimax_msg_send(wimax_dev, skb);
|
||||
error_msg_new:
|
||||
if (IS_ERR(skb))
|
||||
result = PTR_ERR(skb);
|
||||
else
|
||||
result = wimax_msg_send(wimax_dev, skb);
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wimax_msg);
|
||||
|
|
|
@ -338,8 +338,21 @@ out:
|
|||
*/
|
||||
void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
|
||||
{
|
||||
/*
|
||||
* A driver cannot take the wimax_dev out of the
|
||||
* __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
|
||||
* the wimax_dev's state is still NULL, we ignore any request
|
||||
* to change its state because it means it hasn't been yet
|
||||
* registered.
|
||||
*
|
||||
* There is no need to complain about it, as routines that
|
||||
* call this might be shared from different code paths that
|
||||
* are called before or after wimax_dev_add() has done its
|
||||
* job.
|
||||
*/
|
||||
mutex_lock(&wimax_dev->mutex);
|
||||
__wimax_state_change(wimax_dev, new_state);
|
||||
if (wimax_dev->state > __WIMAX_ST_NULL)
|
||||
__wimax_state_change(wimax_dev, new_state);
|
||||
mutex_unlock(&wimax_dev->mutex);
|
||||
return;
|
||||
}
|
||||
|
@ -376,7 +389,7 @@ EXPORT_SYMBOL_GPL(wimax_state_get);
|
|||
void wimax_dev_init(struct wimax_dev *wimax_dev)
|
||||
{
|
||||
INIT_LIST_HEAD(&wimax_dev->id_table_node);
|
||||
__wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED);
|
||||
__wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
|
||||
mutex_init(&wimax_dev->mutex);
|
||||
mutex_init(&wimax_dev->mutex_reset);
|
||||
}
|
||||
|
|
|
@ -906,6 +906,7 @@ EXPORT_SYMBOL(freq_reg_info);
|
|||
int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth,
|
||||
const struct ieee80211_reg_rule **reg_rule)
|
||||
{
|
||||
assert_cfg80211_lock();
|
||||
return freq_reg_info_regd(wiphy, center_freq,
|
||||
bandwidth, reg_rule, NULL);
|
||||
}
|
||||
|
@ -1134,7 +1135,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
|
|||
if (is_world_regdom(cfg80211_regdomain->alpha2) ||
|
||||
(wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
|
||||
return true;
|
||||
if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
|
||||
if (last_request &&
|
||||
last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
|
||||
wiphy->custom_regulatory)
|
||||
return true;
|
||||
return false;
|
||||
|
@ -1143,6 +1145,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
|
|||
/* Reap the advantages of previously found beacons */
|
||||
static void reg_process_beacons(struct wiphy *wiphy)
|
||||
{
|
||||
/*
|
||||
* Means we are just firing up cfg80211, so no beacons would
|
||||
* have been processed yet.
|
||||
*/
|
||||
if (!last_request)
|
||||
return;
|
||||
if (!reg_is_world_roaming(wiphy))
|
||||
return;
|
||||
wiphy_update_beacon_reg(wiphy);
|
||||
|
@ -1177,6 +1185,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
|
|||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_channel *chan;
|
||||
|
||||
assert_cfg80211_lock();
|
||||
|
||||
sband = wiphy->bands[band];
|
||||
BUG_ON(chan_idx >= sband->n_channels);
|
||||
chan = &sband->channels[chan_idx];
|
||||
|
@ -1215,10 +1225,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
|
|||
const struct ieee80211_regdomain *regd)
|
||||
{
|
||||
enum ieee80211_band band;
|
||||
|
||||
mutex_lock(&cfg80211_mutex);
|
||||
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
|
||||
if (wiphy->bands[band])
|
||||
handle_band_custom(wiphy, band, regd);
|
||||
}
|
||||
mutex_unlock(&cfg80211_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
|
||||
|
||||
|
@ -1424,7 +1437,7 @@ new_request:
|
|||
return call_crda(last_request->alpha2);
|
||||
}
|
||||
|
||||
/* This currently only processes user and driver regulatory hints */
|
||||
/* This processes *all* regulatory hints */
|
||||
static void reg_process_hint(struct regulatory_request *reg_request)
|
||||
{
|
||||
int r = 0;
|
||||
|
|
|
@ -395,6 +395,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
|
|||
memcpy(ies, res->pub.information_elements, ielen);
|
||||
found->ies_allocated = true;
|
||||
found->pub.information_elements = ies;
|
||||
found->pub.len_information_elements = ielen;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue