Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	net/iucv/af_iucv.c
This commit is contained in:
David S. Miller 2009-04-23 04:08:24 -07:00
commit 5802b140ed
28 changed files with 298 additions and 189 deletions

View file

@ -1383,6 +1383,11 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
tp->cur_rx = 0;
/* init Rx ring buffer DMA address */
RTL_W32_F (RxBuf, tp->rx_ring_dma);
/* Must enable Tx/Rx before setting transfer thresholds! */
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
@ -1390,8 +1395,6 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W32 (RxConfig, tp->rx_config);
RTL_W32 (TxConfig, rtl8139_tx_config);
tp->cur_rx = 0;
rtl_check_media (dev, 1);
if (tp->chipset >= CH_8139B) {
@ -1406,9 +1409,6 @@ static void rtl8139_hw_start (struct net_device *dev)
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
/* init Rx ring buffer DMA address */
RTL_W32_F (RxBuf, tp->rx_ring_dma);
/* init Tx buffer DMA addresses */
for (i = 0; i < NUM_TX_DESC; i++)
RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));

View file

@ -181,7 +181,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
if (!mdev->workqueue) {
err = -ENOMEM;
goto err_close_nic;
goto err_mr;
}
/* At this stage all non-port specific tasks are complete:
@ -214,9 +214,8 @@ err_free_netdev:
flush_workqueue(mdev->workqueue);
/* Stop event queue before we drop down to release shared SW state */
err_close_nic:
destroy_workqueue(mdev->workqueue);
err_mr:
mlx4_mr_free(dev, &mdev->mr);
err_uar:

View file

@ -348,11 +348,9 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (netif_msg_timer(priv))
mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
if (netif_carrier_ok(dev)) {
priv->port_stats.tx_timeout++;
mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
}
priv->port_stats.tx_timeout++;
mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
}
@ -761,9 +759,14 @@ static void mlx4_en_restart(struct work_struct *work)
struct net_device *dev = priv->dev;
mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mlx4_en_stop_port(dev);
if (mlx4_en_start_port(dev))
mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev);
if (mlx4_en_start_port(dev))
mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
}
@ -1054,7 +1057,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
* Set driver features
*/
dev->features |= NETIF_F_SG;
dev->features |= NETIF_F_HW_CSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |

View file

@ -151,6 +151,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
int i;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@ -165,38 +166,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
be32_to_cpu(mlx4_en_stats->RDROP);
stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
be64_to_cpu(mlx4_en_stats->ROCT_novlan);
stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
stats->rx_packets = 0;
stats->rx_bytes = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i].packets;
stats->rx_bytes += priv->rx_ring[i].bytes;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
for (i = 0; i <= priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i].packets;
stats->tx_bytes += priv->tx_ring[i].bytes;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
be32_to_cpu(mlx4_en_stats->RdropLength) +

View file

@ -94,3 +94,9 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
vunmap(buf->direct.buf);
}
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
}

View file

@ -436,8 +436,9 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Initialize page allocators */
err = mlx4_en_init_allocator(priv, ring);
if (err) {
mlx4_err(mdev, "Failed initializing ring allocator\n");
goto err_allocator;
mlx4_err(mdev, "Failed initializing ring allocator\n");
ring_ind--;
goto err_allocator;
}
/* Fill Rx buffers */
@ -467,6 +468,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring->wqres.db.dma, &ring->srq);
if (err){
mlx4_err(mdev, "Failed to allocate srq\n");
ring_ind--;
goto err_srq;
}
ring->srq.event = mlx4_en_srq_event;
@ -926,12 +928,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
}
}
static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
}
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
int qpn, int srqn, int cqn,
enum mlx4_qp_state *state,

View file

@ -112,6 +112,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
ring->qp.event = mlx4_en_sqp_event;
return 0;

View file

@ -538,6 +538,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int srqn,
struct mlx4_qp_context *context);
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
int mlx4_en_map_buffer(struct mlx4_buf *buf);
void mlx4_en_unmap_buffer(struct mlx4_buf *buf);

View file

@ -1758,7 +1758,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "LA-PCM.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),

View file

@ -1394,7 +1394,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_RPM;
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
upsmr |= UCC_GETH_UPSMR_RPM;
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UCC_GETH_UPSMR_R10M;

View file

@ -1502,7 +1502,6 @@ static const struct net_device_ops atmel_netdev_ops = {
.ndo_set_mac_address = atmel_set_mac_address,
.ndo_start_xmit = start_tx,
.ndo_do_ioctl = atmel_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};

View file

@ -1192,7 +1192,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
return -ENOMEM;
}
} else
iwl_rx_queue_reset(priv, rxq);
iwl3945_rx_queue_reset(priv, rxq);
iwl3945_rx_replenish(priv);

View file

@ -215,6 +215,7 @@ extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq, int count, u32 id);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
const void *data);

View file

@ -976,11 +976,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
dma_sync_single_range_for_cpu(
&priv->pci_dev->dev, rxb->real_dma_addr,
rxb->aligned_dma_addr - rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
/* Reclaim a command buffer only if this packet is a response
@ -1031,9 +1029,6 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);

View file

@ -223,7 +223,7 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */
#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
#define CSR_EEPROM_GP_VALID_MSK (0x00000007)
#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)

View file

@ -360,12 +360,16 @@ struct iwl_host_cmd {
/**
* struct iwl_rx_queue - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
* @rx_free: list of free SKBs for use
* @rx_used: List of Rx buffers with no SKB
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/

View file

@ -799,6 +799,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
if (info->control.hw_key)
iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
/* TODO need this for burst mode later on */
iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@ -819,21 +835,30 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
else
len_org = 0;
/* Tell NIC about any 2-byte padding after MAC header */
if (len_org)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
out_cmd, sizeof(struct iwl_cmd),
&out_cmd->hdr, len,
PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
pci_unmap_len_set(&out_cmd->meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
txcmd_phys += offsetof(struct iwl_cmd, hdr);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
txcmd_phys, len, 1, 0);
if (info->control.hw_key)
iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@ -846,41 +871,29 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
0, 0);
}
/* Tell NIC about any 2-byte padding after MAC header */
if (len_org)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
/* TODO need this for burst mode later on */
iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
offsetof(struct iwl_tx_cmd, scratch);
len = sizeof(struct iwl_tx_cmd) +
sizeof(struct iwl_cmd_header) + hdr_len;
/* take back ownership of DMA buffer to enable update */
pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
len, PCI_DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
le16_to_cpu(out_cmd->hdr.sequence));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
len, PCI_DMA_BIDIRECTIONAL);
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@ -968,18 +981,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
INDEX_TO_SEQ(q->write_ptr));
if (out_cmd->meta.flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
len = (idx == TFD_CMD_SLOTS) ?
IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
phys_addr = pci_map_single(priv->pci_dev, out_cmd,
len, PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
pci_unmap_len_set(&out_cmd->meta, len, len);
phys_addr += offsetof(struct iwl_cmd, hdr);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, fix_size, 1,
U32_PAD(cmd->len));
#ifdef CONFIG_IWLWIFI_DEBUG
switch (out_cmd->hdr.cmd) {
@ -1007,6 +1011,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* Set up entry in queue's byte count circular buffer */
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
fix_size, PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
pci_unmap_len_set(&out_cmd->meta, len, fix_size);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, fix_size, 1,
U32_PAD(cmd->len));
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
ret = iwl_txq_update_write_ptr(priv, txq);

View file

@ -972,7 +972,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
int txq_id = skb_get_queue_mapping(skb);
u16 len, idx, len_org, hdr_len;
u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */
u8 id;
u8 unicast;
u8 sta_id;
@ -1074,6 +1074,40 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx->hdr, hdr, hdr_len);
if (info->control.hw_key)
iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
/* TODO need this for burst mode later on */
iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx->len = cpu_to_le16(len);
tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
le16_to_cpu(out_cmd->hdr.sequence));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
ieee80211_hdrlen(fc));
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@ -1096,22 +1130,18 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
out_cmd, sizeof(struct iwl_cmd),
PCI_DMA_TODEVICE);
txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
len, PCI_DMA_TODEVICE);
/* we do not map meta data ... so we can safely access address to
* provide to unmap command*/
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
txcmd_phys += offsetof(struct iwl_cmd, hdr);
pci_unmap_len_set(&out_cmd->meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
txcmd_phys, len, 1, 0);
if (info->control.hw_key)
iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@ -1124,32 +1154,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
0, U32_PAD(len));
}
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx->len = cpu_to_le16(len);
/* TODO need this for burst mode later on */
iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
ieee80211_hdrlen(fc));
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@ -1661,6 +1665,37 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
spin_unlock_irqrestore(&rxq->lock, flags);
}
void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
unsigned long flags;
int i;
spin_lock_irqsave(&rxq->lock, flags);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
/* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
priv->alloc_rxb_skb--;
dev_kfree_skb(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
}
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
EXPORT_SYMBOL(iwl3945_rx_queue_reset);
/*
* this should be called while priv->lock is locked
*/
@ -1685,6 +1720,34 @@ void iwl3945_rx_replenish(void *data)
spin_unlock_irqrestore(&priv->lock, flags);
}
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
* This free routine walks the list of POOL entries and if SKB is set to
* non NULL it is unmapped and freed
*/
static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
}
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
EXPORT_SYMBOL(iwl3945_rx_queue_free);
/* Convert linear signal-to-noise ratio into dB */
static u8 ratio2dB[100] = {
/* 0 1 2 3 4 5 6 7 8 9 */
@ -1802,9 +1865,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
/* Reclaim a command buffer only if this packet is a response
@ -1852,9 +1915,6 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
@ -5196,12 +5256,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
iwl_rfkill_unregister(priv);
cancel_delayed_work(&priv->rfkill_poll);
cancel_delayed_work_sync(&priv->rfkill_poll);
iwl3945_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
iwl_rx_queue_free(priv, &priv->rxq);
iwl3945_rx_queue_free(priv, &priv->rxq);
iwl3945_hw_txq_ctx_free(priv);
iwl3945_unset_hw_params(priv);

View file

@ -893,8 +893,7 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
rx_desc->next_rx_desc_phys_addr =
cpu_to_le32(rxq->rx_desc_dma
+ nexti * sizeof(*rx_desc));
rx_desc->rx_ctrl =
cpu_to_le32(MWL8K_RX_CTRL_OWNED_BY_HOST);
rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST;
}
return 0;

View file

@ -46,6 +46,7 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
e100/d102e_ucode.bin
fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis
fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
advansys/3550.bin advansys/38C0800.bin

View file

@ -576,6 +576,16 @@ Found in hex form in kernel source.
--------------------------------------------------------------------------
Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
File: cis/LA-PCM.cis
Licence: GPL
Originally developed by the pcmcia-cs project
--------------------------------------------------------------------------
Driver: PCMCIA_SMC91C92 - SMC 91Cxx PCMCIA
File: ositech/Xilinx7OD.bin

View file

@ -0,0 +1,20 @@
:100000000105D4F953E9FF17035338FF20040FC04B
:1000100002002102060315390401416C6C69656414
:100020002054656C657369732C4B2E4B00457468C6
:1000300065726E6574204C414E20436172640043CA
:10004000656E747265434F4D004C412D50434D0019
:10005000FF1A0602100000020B1B08810108E06075
:1000600000021F1B08820108E06020021F1B08839A
:100070000108E06040021F1B08840108E060600284
:100080001F1B08850108E06080021F1B088601080D
:10009000E060A0021F1B08870108E060C0021F1B70
:1000A00008880108E060E0021F1B08890108E06081
:1000B00000031F1B088A0108E06020031F1B088B38
:1000C0000108E06040031F1B088C0108E06060032A
:1000D0001F1B088D0108E06080031F1B088E0108AC
:1000E000E060A0031F1B088F0108E060C0031F1B16
:0D00F00008900108E060E0031F1400FF000D
:00000001FF
#
# Replacement CIS for Allied Telesis LA-PCM
#

View file

@ -215,6 +215,7 @@ static void iucv_sock_close(struct sock *sk)
err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
}
case IUCV_CLOSING: /* fall through */
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
@ -269,6 +270,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
iucv_sk(sk)->send_tag = 0;
iucv_sk(sk)->flags = 0;
iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
iucv_sk(sk)->path = NULL;
memset(&iucv_sk(sk)->src_user_id , 0, 32);
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@ -979,6 +982,10 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
/* receive/dequeue next skb:
* the function understands MSG_PEEK and, thus, does not dequeue skb */
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@ -1046,9 +1053,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
iucv_process_message_q(sk);
spin_unlock_bh(&iucv->message_q.lock);
}
} else
skb_queue_head(&sk->sk_receive_queue, skb);
}
done:
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
@ -1125,6 +1130,9 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
switch (sk->sk_state) {
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_SEVERED:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
@ -1398,8 +1406,12 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
struct sock_msg_q *save_msg;
int len;
if (sk->sk_shutdown & RCV_SHUTDOWN)
if (sk->sk_shutdown & RCV_SHUTDOWN) {
iucv_message_reject(path, msg);
return;
}
spin_lock(&iucv->message_q.lock);
if (!list_empty(&iucv->message_q.list) ||
!skb_queue_empty(&iucv->backlog_skb_q))
@ -1414,9 +1426,8 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
if (!skb)
goto save_message;
spin_lock(&iucv->message_q.lock);
iucv_process_message(sk, skb, path, msg);
spin_unlock(&iucv->message_q.lock);
goto out_unlock;
return;
@ -1427,8 +1438,9 @@ save_message:
save_msg->path = path;
save_msg->msg = *msg;
spin_lock(&iucv->message_q.lock);
list_add_tail(&save_msg->list, &iucv->message_q.list);
out_unlock:
spin_unlock(&iucv->message_q.lock);
}

View file

@ -156,8 +156,19 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
WARN_ON(ieee80211_if_config(sdata, changed));
ieee80211_bss_info_change_notify(sdata, ~0);
/*
* Driver's config_interface can fail if rfkill is
* enabled. Accommodate this return code.
* FIXME: When mac80211 has knowledge of rfkill
* state the code below can change back to:
* WARN(ieee80211_if_config(sdata, changed));
* ieee80211_bss_info_change_notify(sdata, ~0);
*/
if (ieee80211_if_config(sdata, changed))
printk(KERN_DEBUG "%s: failed to configure interface during resume\n",
sdata->dev->name);
else
ieee80211_bss_info_change_notify(sdata, ~0);
break;
case NL80211_IFTYPE_WDS:
break;

View file

@ -1397,7 +1397,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
* mac80211. That also explains the __skb_push()
* below.
*/
align = (unsigned long)skb->data & 4;
align = (unsigned long)skb->data & 3;
if (align) {
if (WARN_ON(skb_headroom(skb) < 3)) {
dev_kfree_skb(skb);

View file

@ -988,7 +988,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
char *helpname;
char *helpname = NULL;
int err;
/* don't change helper of sibling connections */
@ -1231,7 +1231,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
rcu_read_lock();
if (cda[CTA_HELP]) {
char *helpname;
char *helpname = NULL;
err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
if (err < 0)

View file

@ -256,13 +256,11 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
{
struct netlbl_af4list *entry;
entry = netlbl_af4list_search(addr, head);
if (entry != NULL && entry->addr == addr && entry->mask == mask) {
netlbl_af4list_remove_entry(entry);
return entry;
}
return NULL;
entry = netlbl_af4list_search_exact(addr, mask, head);
if (entry == NULL)
return NULL;
netlbl_af4list_remove_entry(entry);
return entry;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@ -299,15 +297,11 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
{
struct netlbl_af6list *entry;
entry = netlbl_af6list_search(addr, head);
if (entry != NULL &&
ipv6_addr_equal(&entry->addr, addr) &&
ipv6_addr_equal(&entry->mask, mask)) {
netlbl_af6list_remove_entry(entry);
return entry;
}
return NULL;
entry = netlbl_af6list_search_exact(addr, mask, head);
if (entry == NULL)
return NULL;
netlbl_af6list_remove_entry(entry);
return entry;
}
#endif /* IPv6 */

View file

@ -1084,8 +1084,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
if (len > 65536)
return -EMSGSIZE;
if (len > 65536) {
err = -EMSGSIZE;
goto out;
}
SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;