mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (29 commits) cxgb3: Fix crash caused by stashing wrong netdev_queue ixgbe: Fix coexistence of FCoE and Flow Director in 82599 memory barrier: adding smp_mb__after_lock net: adding memory barrier to the poll and receive callbacks netpoll: Fix carrier detection for drivers that are using phylib includecheck fix: include/linux, rfkill.h p54: tx refused but queue active Atheros Kconfig needs to be dependent on WLAN_80211 mac80211: fix docbook mac80211_hwsim: avoid NULL access ssb: Add support for 4318E b43: Add support for 4318E zd1211rw: adding SONY IFU-WLM2 (054c:0257) as a zd1211b device zd1211rw: 07b8:6001 is a ZD1211B r6040: bump driver version to 0.24 and date to 08 July 2009 r6040: restore MIER register correctly when IRQ line is shared ipv4: Fix fib_trie rebalancing, part 4 (root thresholds) davinci_emac: fix kernel oops when changing MAC address while interface is down igb: set lan id prior to configuring phy mac80211: minstrel: avoid accessing negative indices in rix_to_ndx() ...
This commit is contained in:
commit
e864561c12
40 changed files with 192 additions and 60 deletions
|
@ -184,8 +184,6 @@ usage should require reading the full document.
|
|||
!Finclude/net/mac80211.h ieee80211_ctstoself_get
|
||||
!Finclude/net/mac80211.h ieee80211_ctstoself_duration
|
||||
!Finclude/net/mac80211.h ieee80211_generic_frame_duration
|
||||
!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
|
||||
!Finclude/net/mac80211.h ieee80211_hdrlen
|
||||
!Finclude/net/mac80211.h ieee80211_wake_queue
|
||||
!Finclude/net/mac80211.h ieee80211_stop_queue
|
||||
!Finclude/net/mac80211.h ieee80211_wake_queues
|
||||
|
|
|
@ -302,4 +302,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|||
#define _raw_read_relax(lock) cpu_relax()
|
||||
#define _raw_write_relax(lock) cpu_relax()
|
||||
|
||||
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
||||
static inline void smp_mb__after_lock(void) { }
|
||||
#define ARCH_HAS_SMP_MB_AFTER_LOCK
|
||||
|
||||
#endif /* _ASM_X86_SPINLOCK_H */
|
||||
|
|
|
@ -642,8 +642,7 @@ static int setup_sge_qsets(struct adapter *adap)
|
|||
struct port_info *pi = netdev_priv(dev);
|
||||
|
||||
pi->qs = &adap->sge.qs[pi->first_qset];
|
||||
for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
|
||||
++j, ++qset_idx) {
|
||||
for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
|
||||
set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
|
||||
err = t3_sge_alloc_qset(adap, qset_idx, 1,
|
||||
(adap->flags & USING_MSIX) ? qset_idx + 1 :
|
||||
|
|
|
@ -1820,11 +1820,19 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
|
|||
struct device *emac_dev = &priv->ndev->dev;
|
||||
struct sockaddr *sa = addr;
|
||||
|
||||
if (!is_valid_ether_addr(sa->sa_data))
|
||||
return -EINVAL;
|
||||
|
||||
/* Store mac addr in priv and rx channel and set it in EMAC hw */
|
||||
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
|
||||
memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
|
||||
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
|
||||
emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
|
||||
|
||||
/* If the interface is down - rxch is NULL. */
|
||||
/* MAC address is configured only after the interface is enabled. */
|
||||
if (netif_running(ndev)) {
|
||||
memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
|
||||
emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
|
||||
}
|
||||
|
||||
if (netif_msg_drv(priv))
|
||||
dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
|
||||
|
|
|
@ -46,12 +46,12 @@
|
|||
|
||||
#else
|
||||
|
||||
#define FEC_ECNTRL; 0x000 /* Ethernet control reg */
|
||||
#define FEC_IEVENT; 0x004 /* Interrupt even reg */
|
||||
#define FEC_IMASK; 0x008 /* Interrupt mask reg */
|
||||
#define FEC_IVEC; 0x00c /* Interrupt vec status reg */
|
||||
#define FEC_R_DES_ACTIVE; 0x010 /* Receive descriptor reg */
|
||||
#define FEC_X_DES_ACTIVE; 0x01c /* Transmit descriptor reg */
|
||||
#define FEC_ECNTRL 0x000 /* Ethernet control reg */
|
||||
#define FEC_IEVENT 0x004 /* Interrupt even reg */
|
||||
#define FEC_IMASK 0x008 /* Interrupt mask reg */
|
||||
#define FEC_IVEC 0x00c /* Interrupt vec status reg */
|
||||
#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
|
||||
#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
|
||||
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
|
||||
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
|
||||
#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
|
||||
|
|
|
@ -190,6 +190,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
|||
phy->ops.write_reg = igb_write_phy_reg_igp;
|
||||
}
|
||||
|
||||
/* set lan id */
|
||||
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
|
||||
E1000_STATUS_FUNC_SHIFT;
|
||||
|
||||
/* Set phy->phy_addr and phy->id. */
|
||||
ret_val = igb_get_phy_id_82575(hw);
|
||||
if (ret_val)
|
||||
|
|
|
@ -138,6 +138,10 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
|
|||
adapter->hw.fc.requested_mode = ixgbe_fc_none;
|
||||
}
|
||||
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
|
||||
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
||||
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
|
||||
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
|
||||
}
|
||||
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
|
||||
ixgbe_init_interrupt_scheme(adapter);
|
||||
if (netif_running(netdev))
|
||||
|
@ -154,6 +158,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
|
|||
adapter->dcb_cfg.pfc_mode_enable = false;
|
||||
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
|
||||
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
|
||||
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
|
||||
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
|
||||
ixgbe_init_interrupt_scheme(adapter);
|
||||
if (netif_running(netdev))
|
||||
netdev->netdev_ops->ndo_open(netdev);
|
||||
|
|
|
@ -3130,7 +3130,11 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
|
|||
#endif
|
||||
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
|
||||
DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
|
||||
ixgbe_set_rss_queues(adapter);
|
||||
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
|
||||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
|
||||
ixgbe_set_fdir_queues(adapter);
|
||||
else
|
||||
ixgbe_set_rss_queues(adapter);
|
||||
}
|
||||
/* adding FCoE rx rings to the end */
|
||||
f->mask = adapter->num_rx_queues;
|
||||
|
@ -3388,7 +3392,12 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
|
|||
}
|
||||
#endif /* CONFIG_IXGBE_DCB */
|
||||
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
|
||||
ixgbe_cache_ring_rss(adapter);
|
||||
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
|
||||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
|
||||
ixgbe_cache_ring_fdir(adapter);
|
||||
else
|
||||
ixgbe_cache_ring_rss(adapter);
|
||||
|
||||
fcoe_i = f->mask;
|
||||
}
|
||||
for (i = 0; i < f->indices; i++, fcoe_i++)
|
||||
|
@ -5578,12 +5587,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
|||
netdev->features |= NETIF_F_FCOE_CRC;
|
||||
netdev->features |= NETIF_F_FSO;
|
||||
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
|
||||
DPRINTK(DRV, INFO, "FCoE enabled, "
|
||||
"disabling Flow Director\n");
|
||||
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
|
||||
adapter->flags &=
|
||||
~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
|
||||
adapter->atr_sample_rate = 0;
|
||||
} else {
|
||||
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
|
||||
}
|
||||
|
|
|
@ -49,8 +49,8 @@
|
|||
#include <asm/processor.h>
|
||||
|
||||
#define DRV_NAME "r6040"
|
||||
#define DRV_VERSION "0.23"
|
||||
#define DRV_RELDATE "05May2009"
|
||||
#define DRV_VERSION "0.24"
|
||||
#define DRV_RELDATE "08Jul2009"
|
||||
|
||||
/* PHY CHIP Address */
|
||||
#define PHY1_ADDR 1 /* For MAC1 */
|
||||
|
@ -704,8 +704,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
|
|||
/* Read MISR status and clear */
|
||||
status = ioread16(ioaddr + MISR);
|
||||
|
||||
if (status == 0x0000 || status == 0xffff)
|
||||
if (status == 0x0000 || status == 0xffff) {
|
||||
/* Restore RDC MAC interrupt */
|
||||
iowrite16(misr, ioaddr + MIER);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* RX interrupt request */
|
||||
if (status & RX_INTS) {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
config ATH_COMMON
|
||||
tristate "Atheros Wireless Cards"
|
||||
depends on WLAN_80211
|
||||
depends on ATH5K || ATH9K || AR9170_USB
|
||||
|
||||
source "drivers/net/wireless/ath/ath5k/Kconfig"
|
||||
|
|
|
@ -355,7 +355,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
}
|
||||
|
||||
if (bf_next == NULL) {
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
/*
|
||||
* Make sure the last desc is reclaimed if it
|
||||
* not a holding desc.
|
||||
*/
|
||||
if (!bf_last->bf_stale)
|
||||
list_move_tail(&bf->list, &bf_head);
|
||||
else
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
} else {
|
||||
ASSERT(!list_empty(bf_q));
|
||||
list_move_tail(&bf->list, &bf_head);
|
||||
|
|
|
@ -648,6 +648,7 @@ struct b43_wl {
|
|||
u8 nr_devs;
|
||||
|
||||
bool radiotap_enabled;
|
||||
bool radio_enabled;
|
||||
|
||||
/* The beacon we are currently using (AP or IBSS mode).
|
||||
* This beacon stuff is protected by the irq_lock. */
|
||||
|
|
|
@ -3497,8 +3497,8 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
|
|||
if (phy->ops->set_rx_antenna)
|
||||
phy->ops->set_rx_antenna(dev, antenna);
|
||||
|
||||
if (!!conf->radio_enabled != phy->radio_on) {
|
||||
if (conf->radio_enabled) {
|
||||
if (wl->radio_enabled != phy->radio_on) {
|
||||
if (wl->radio_enabled) {
|
||||
b43_software_rfkill(dev, false);
|
||||
b43info(dev->wl, "Radio turned on by software\n");
|
||||
if (!dev->radio_hw_enable) {
|
||||
|
@ -4339,6 +4339,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
|
|||
wl->beacon0_uploaded = 0;
|
||||
wl->beacon1_uploaded = 0;
|
||||
wl->beacon_templates_virgin = 1;
|
||||
wl->radio_enabled = 1;
|
||||
|
||||
mutex_lock(&wl->mutex);
|
||||
|
||||
|
@ -4378,6 +4379,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
|
|||
if (b43_status(dev) >= B43_STAT_STARTED)
|
||||
b43_wireless_core_stop(dev);
|
||||
b43_wireless_core_exit(dev);
|
||||
wl->radio_enabled = 0;
|
||||
mutex_unlock(&wl->mutex);
|
||||
|
||||
cancel_work_sync(&(wl->txpower_adjust_work));
|
||||
|
@ -4560,6 +4562,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
|
|||
B43_WARN_ON(1);
|
||||
|
||||
dev->phy.gmode = have_2ghz_phy;
|
||||
dev->phy.radio_on = 1;
|
||||
tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
|
||||
b43_wireless_core_reset(dev, tmp);
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
|
||||
static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = {
|
||||
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
|
||||
PCMCIA_DEVICE_NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -607,6 +607,7 @@ struct b43legacy_wl {
|
|||
u8 nr_devs;
|
||||
|
||||
bool radiotap_enabled;
|
||||
bool radio_enabled;
|
||||
|
||||
/* The beacon we are currently using (AP or IBSS mode).
|
||||
* This beacon stuff is protected by the irq_lock. */
|
||||
|
|
|
@ -2689,8 +2689,8 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
|
|||
/* Antennas for RX and management frame TX. */
|
||||
b43legacy_mgmtframe_txantenna(dev, antenna_tx);
|
||||
|
||||
if (!!conf->radio_enabled != phy->radio_on) {
|
||||
if (conf->radio_enabled) {
|
||||
if (wl->radio_enabled != phy->radio_on) {
|
||||
if (wl->radio_enabled) {
|
||||
b43legacy_radio_turn_on(dev);
|
||||
b43legacyinfo(dev->wl, "Radio turned on by software\n");
|
||||
if (!dev->radio_hw_enable)
|
||||
|
@ -3441,6 +3441,7 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
|
|||
wl->beacon0_uploaded = 0;
|
||||
wl->beacon1_uploaded = 0;
|
||||
wl->beacon_templates_virgin = 1;
|
||||
wl->radio_enabled = 1;
|
||||
|
||||
mutex_lock(&wl->mutex);
|
||||
|
||||
|
@ -3479,6 +3480,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
|
|||
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
|
||||
b43legacy_wireless_core_stop(dev);
|
||||
b43legacy_wireless_core_exit(dev);
|
||||
wl->radio_enabled = 0;
|
||||
mutex_unlock(&wl->mutex);
|
||||
}
|
||||
|
||||
|
@ -3620,6 +3622,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
|
|||
have_bphy = 1;
|
||||
|
||||
dev->phy.gmode = (have_gphy || have_bphy);
|
||||
dev->phy.radio_on = 1;
|
||||
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
|
||||
b43legacy_wireless_core_reset(dev, tmp);
|
||||
|
||||
|
|
|
@ -4,6 +4,15 @@ config IWM
|
|||
depends on CFG80211
|
||||
select WIRELESS_EXT
|
||||
select FW_LOADER
|
||||
help
|
||||
The Intel Wireless Multicomm 3200 hardware is a combo
|
||||
card with GPS, Bluetooth, WiMax and 802.11 radios. It
|
||||
runs over SDIO and is typically found on Moorestown
|
||||
based platform. This driver takes care of the 802.11
|
||||
part, which is a fullmac one.
|
||||
|
||||
If you choose to build it as a module, it'll be called
|
||||
iwmc3200wifi.ko.
|
||||
|
||||
config IWM_DEBUG
|
||||
bool "Enable full debugging output in iwmc3200wifi"
|
||||
|
|
|
@ -418,6 +418,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
|
|||
continue;
|
||||
|
||||
if (!data2->started || !hwsim_ps_rx_ok(data2, skb) ||
|
||||
!data->channel || !data2->channel ||
|
||||
data->channel->center_freq != data2->channel->center_freq ||
|
||||
!(data->group & data2->group))
|
||||
continue;
|
||||
|
|
|
@ -912,13 +912,14 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
__skb_unlink(entry, &priv->tx_queue);
|
||||
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
|
||||
|
||||
frame_len = entry->len;
|
||||
entry_hdr = (struct p54_hdr *) entry->data;
|
||||
entry_data = (struct p54_tx_data *) entry_hdr->data;
|
||||
priv->tx_stats[entry_data->hw_queue].len--;
|
||||
if (priv->tx_stats[entry_data->hw_queue].len)
|
||||
priv->tx_stats[entry_data->hw_queue].len--;
|
||||
priv->stats.dot11ACKFailureCount += payload->tries - 1;
|
||||
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
|
||||
|
||||
/*
|
||||
* Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
|
||||
|
|
|
@ -38,7 +38,6 @@ static struct usb_device_id usb_ids[] = {
|
|||
/* ZD1211 */
|
||||
{ USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
|
||||
|
@ -61,6 +60,7 @@ static struct usb_device_id usb_ids[] = {
|
|||
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
|
||||
/* ZD1211B */
|
||||
{ USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
|
||||
|
@ -87,6 +87,7 @@ static struct usb_device_id usb_ids[] = {
|
|||
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
|
||||
/* "Driverless" devices that need ejecting */
|
||||
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
|
||||
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
|
||||
|
|
|
@ -171,7 +171,7 @@ static int hp_wmi_tablet_state(void)
|
|||
static int hp_wmi_set_block(void *data, bool blocked)
|
||||
{
|
||||
unsigned long b = (unsigned long) data;
|
||||
int query = BIT(b + 8) | ((!!blocked) << b);
|
||||
int query = BIT(b + 8) | ((!blocked) << b);
|
||||
|
||||
return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query);
|
||||
}
|
||||
|
|
|
@ -678,7 +678,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
|
|||
sprom->board_rev = tuple.TupleData[1];
|
||||
break;
|
||||
case SSB_PCMCIA_CIS_PA:
|
||||
GOTO_ERROR_ON(tuple.TupleDataLen != 9,
|
||||
GOTO_ERROR_ON((tuple.TupleDataLen != 9) &&
|
||||
(tuple.TupleDataLen != 10),
|
||||
"pa tpl size");
|
||||
sprom->pa0b0 = tuple.TupleData[1] |
|
||||
((u16)tuple.TupleData[2] << 8);
|
||||
|
@ -718,7 +719,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
|
|||
sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1];
|
||||
break;
|
||||
case SSB_PCMCIA_CIS_BFLAGS:
|
||||
GOTO_ERROR_ON(tuple.TupleDataLen != 3,
|
||||
GOTO_ERROR_ON((tuple.TupleDataLen != 3) &&
|
||||
(tuple.TupleDataLen != 5),
|
||||
"bfl tpl size");
|
||||
sprom->boardflags_lo = tuple.TupleData[1] |
|
||||
((u16)tuple.TupleData[2] << 8);
|
||||
|
|
|
@ -99,7 +99,6 @@ enum rfkill_user_states {
|
|||
#undef RFKILL_STATE_UNBLOCKED
|
||||
#undef RFKILL_STATE_HARD_BLOCKED
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
|
|
|
@ -132,6 +132,11 @@ do { \
|
|||
#endif /*__raw_spin_is_contended*/
|
||||
#endif
|
||||
|
||||
/* The lock does not imply full memory barrier. */
|
||||
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
|
||||
static inline void smp_mb__after_lock(void) { smp_mb(); }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* spin_unlock_wait - wait until the spinlock gets unlocked
|
||||
* @lock: the spinlock in question.
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
|
||||
#include <linux/filter.h>
|
||||
#include <linux/rculist_nulls.h>
|
||||
#include <linux/poll.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <net/dst.h>
|
||||
|
@ -1241,6 +1242,74 @@ static inline int sk_has_allocations(const struct sock *sk)
|
|||
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
|
||||
}
|
||||
|
||||
/**
|
||||
* sk_has_sleeper - check if there are any waiting processes
|
||||
* @sk: socket
|
||||
*
|
||||
* Returns true if socket has waiting processes
|
||||
*
|
||||
* The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
|
||||
* barrier call. They were added due to the race found within the tcp code.
|
||||
*
|
||||
* Consider following tcp code paths:
|
||||
*
|
||||
* CPU1 CPU2
|
||||
*
|
||||
* sys_select receive packet
|
||||
* ... ...
|
||||
* __add_wait_queue update tp->rcv_nxt
|
||||
* ... ...
|
||||
* tp->rcv_nxt check sock_def_readable
|
||||
* ... {
|
||||
* schedule ...
|
||||
* if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
* wake_up_interruptible(sk->sk_sleep)
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
|
||||
* in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
|
||||
* could then endup calling schedule and sleep forever if there are no more
|
||||
* data on the socket.
|
||||
*
|
||||
* The sk_has_sleeper is always called right after a call to read_lock, so we
|
||||
* can use smp_mb__after_lock barrier.
|
||||
*/
|
||||
static inline int sk_has_sleeper(struct sock *sk)
|
||||
{
|
||||
/*
|
||||
* We need to be sure we are in sync with the
|
||||
* add_wait_queue modifications to the wait queue.
|
||||
*
|
||||
* This memory barrier is paired in the sock_poll_wait.
|
||||
*/
|
||||
smp_mb__after_lock();
|
||||
return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
|
||||
}
|
||||
|
||||
/**
|
||||
* sock_poll_wait - place memory barrier behind the poll_wait call.
|
||||
* @filp: file
|
||||
* @wait_address: socket wait queue
|
||||
* @p: poll_table
|
||||
*
|
||||
* See the comments in the sk_has_sleeper function.
|
||||
*/
|
||||
static inline void sock_poll_wait(struct file *filp,
|
||||
wait_queue_head_t *wait_address, poll_table *p)
|
||||
{
|
||||
if (p && wait_address) {
|
||||
poll_wait(filp, wait_address, p);
|
||||
/*
|
||||
* We need to be sure we are in sync with the
|
||||
* socket flags modification.
|
||||
*
|
||||
* This memory barrier is paired in the sk_has_sleeper.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue a received datagram if it will fit. Stream and sequenced
|
||||
* protocols can't normally use this as they need to fit buffers in
|
||||
|
|
|
@ -92,7 +92,7 @@ static void vcc_sock_destruct(struct sock *sk)
|
|||
static void vcc_def_wakeup(struct sock *sk)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up(sk->sk_sleep);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ static void vcc_write_space(struct sock *sk)
|
|||
read_lock(&sk->sk_callback_lock);
|
||||
|
||||
if (vcc_writable(sk)) {
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
|
@ -594,7 +594,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|||
struct atm_vcc *vcc;
|
||||
unsigned int mask;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
mask = 0;
|
||||
|
||||
vcc = ATM_SD(sock);
|
||||
|
|
|
@ -712,7 +712,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk;
|
||||
unsigned int mask;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
mask = 0;
|
||||
|
||||
/* exceptional events? */
|
||||
|
|
|
@ -740,7 +740,7 @@ int netpoll_setup(struct netpoll *np)
|
|||
np->name);
|
||||
break;
|
||||
}
|
||||
cond_resched();
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
/* If carrier appears to come up instantly, we don't
|
||||
|
|
|
@ -1715,7 +1715,7 @@ EXPORT_SYMBOL(sock_no_sendpage);
|
|||
static void sock_def_wakeup(struct sock *sk)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_all(sk->sk_sleep);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
}
|
||||
|
@ -1723,7 +1723,7 @@ static void sock_def_wakeup(struct sock *sk)
|
|||
static void sock_def_error_report(struct sock *sk)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
|
||||
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
|
@ -1732,7 +1732,7 @@ static void sock_def_error_report(struct sock *sk)
|
|||
static void sock_def_readable(struct sock *sk, int len)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
|
||||
POLLRDNORM | POLLRDBAND);
|
||||
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
|
||||
|
@ -1747,7 +1747,7 @@ static void sock_def_write_space(struct sock *sk)
|
|||
* progress. --DaveM
|
||||
*/
|
||||
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ void dccp_write_space(struct sock *sk)
|
|||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
/* Should agree with poll, otherwise some programs break */
|
||||
if (sock_writeable(sk))
|
||||
|
|
|
@ -311,7 +311,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
|
|||
unsigned int mask;
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
if (sk->sk_state == DCCP_LISTEN)
|
||||
return inet_csk_listen_poll(sk);
|
||||
|
||||
|
|
|
@ -316,8 +316,8 @@ static inline void check_tnode(const struct tnode *tn)
|
|||
|
||||
static const int halve_threshold = 25;
|
||||
static const int inflate_threshold = 50;
|
||||
static const int halve_threshold_root = 8;
|
||||
static const int inflate_threshold_root = 15;
|
||||
static const int halve_threshold_root = 15;
|
||||
static const int inflate_threshold_root = 25;
|
||||
|
||||
|
||||
static void __alias_free_mem(struct rcu_head *head)
|
||||
|
|
|
@ -339,7 +339,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|||
struct sock *sk = sock->sk;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
return inet_csk_listen_poll(sk);
|
||||
|
||||
|
|
|
@ -306,7 +306,7 @@ static inline int iucv_below_msglim(struct sock *sk)
|
|||
static void iucv_sock_wake_msglim(struct sock *sk)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_all(sk->sk_sleep);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
|
@ -1256,7 +1256,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk;
|
||||
unsigned int mask = 0;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
|
||||
if (sk->sk_state == IUCV_LISTEN)
|
||||
return iucv_accept_poll(sk);
|
||||
|
|
|
@ -637,7 +637,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
|
|||
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
||||
struct mesh_preq_queue *preq_node;
|
||||
|
||||
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
|
||||
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
|
||||
if (!preq_node) {
|
||||
printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
|
||||
return;
|
||||
|
|
|
@ -66,7 +66,7 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
|
|||
for (i = rix; i >= 0; i--)
|
||||
if (mi->r[i].rix == rix)
|
||||
break;
|
||||
WARN_ON(mi->r[i].rix != rix);
|
||||
WARN_ON(i < 0);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -181,6 +181,9 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
|
|||
break;
|
||||
|
||||
ndx = rix_to_ndx(mi, ar[i].idx);
|
||||
if (ndx < 0)
|
||||
continue;
|
||||
|
||||
mi->r[ndx].attempts += ar[i].count;
|
||||
|
||||
if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))
|
||||
|
|
|
@ -63,7 +63,7 @@ static void rxrpc_write_space(struct sock *sk)
|
|||
_enter("%p", sk);
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (rxrpc_writable(sk)) {
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
}
|
||||
|
@ -588,7 +588,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
|
|||
unsigned int mask;
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
mask = 0;
|
||||
|
||||
/* the socket is readable if there are any messages waiting on the Rx
|
||||
|
|
|
@ -315,7 +315,7 @@ static void unix_write_space(struct sock *sk)
|
|||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (unix_writable(sk)) {
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_sync(sk->sk_sleep);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
}
|
||||
|
@ -1985,7 +1985,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
|
|||
struct sock *sk = sock->sk;
|
||||
unsigned int mask;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
mask = 0;
|
||||
|
||||
/* exceptional events? */
|
||||
|
@ -2022,7 +2022,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk, *other;
|
||||
unsigned int mask, writable;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
mask = 0;
|
||||
|
||||
/* exceptional events? */
|
||||
|
@ -2053,7 +2053,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
|
|||
other = unix_peer_get(sk);
|
||||
if (other) {
|
||||
if (unix_peer(other) != sk) {
|
||||
poll_wait(file, &unix_sk(other)->peer_wait,
|
||||
sock_poll_wait(file, &unix_sk(other)->peer_wait,
|
||||
wait);
|
||||
if (unix_recvq_full(other))
|
||||
writable = 0;
|
||||
|
|
|
@ -447,6 +447,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
rdev = __cfg80211_drv_from_info(info);
|
||||
if (IS_ERR(rdev)) {
|
||||
mutex_unlock(&cfg80211_mutex);
|
||||
result = PTR_ERR(rdev);
|
||||
goto unlock;
|
||||
}
|
||||
|
|
|
@ -366,7 +366,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
|
|||
found = rb_find_bss(dev, res);
|
||||
|
||||
if (found) {
|
||||
kref_get(&found->ref);
|
||||
found->pub.beacon_interval = res->pub.beacon_interval;
|
||||
found->pub.tsf = res->pub.tsf;
|
||||
found->pub.signal = res->pub.signal;
|
||||
|
|
Loading…
Reference in a new issue